diff --git a/.gitattributes b/.gitattributes index 15b78d9bbf5057114735a362d74f640e77697307..50a40a52d57f664ad8665f3005e7bad5648c8930 100644 --- a/.gitattributes +++ b/.gitattributes @@ -10,3 +10,7 @@ model_output/incremental_1_logs/checkpoint-575/tokenizer.model filter=lfs diff=l model_output/incremental_1_logs/checkpoint-575/optimizer.pt filter=lfs diff=lfs merge=lfs -text model_output/incremental_1_logs/runs/Jul09_05-50-46_59575be58a1d/events.out.tfevents.1752040290.59575be58a1d.1.0 filter=lfs diff=lfs merge=lfs -text model_output/phi2_finetuned_logs/tokenizer.model filter=lfs diff=lfs merge=lfs -text +model_output/phi2_finetuned_logs/runs/Jul08_09-43-04_730424d57e0c/events.out.tfevents.1751967877.730424d57e0c.1.0 filter=lfs diff=lfs merge=lfs -text +model_output/phi2_finetuned_logs/runs/Jul08_10-04-11_bfa8fc5c5694/events.out.tfevents.1751969144.bfa8fc5c5694.1.0 filter=lfs diff=lfs merge=lfs -text +model_output/phi2_finetuned_logs/runs/Jul07_09-03-41_137f970d26fa/events.out.tfevents.1751879087.137f970d26fa.1.0 filter=lfs diff=lfs merge=lfs -text +phivenv/Lib/site-packages/charset_normalizer/md__mypyc.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text diff --git a/model_output/incremental_1_logs/checkpoint-575/adapter_model.safetensors b/model_output/incremental_1_logs/checkpoint-575/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..91791351e3d77c825c07b1dfe3873c34a854b643 --- /dev/null +++ b/model_output/incremental_1_logs/checkpoint-575/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acad913dd6e9b753ddf765a86cc873b844a7ce42b4fbd377c87a591b38a5dd5d +size 18034152 diff --git a/model_output/phi2_finetuned_logs/adapter_model.safetensors b/model_output/phi2_finetuned_logs/adapter_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..443d793016ae0e2265a2da8838886051b77649f5 --- /dev/null +++ b/model_output/phi2_finetuned_logs/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af7f9bad7df6784c8bf332da8aaf3241d14a27570d81e50f2d6c6186a9d5f152 +size 18034152 diff --git a/model_output/phi2_finetuned_logs/runs/Jul07_09-03-41_137f970d26fa/events.out.tfevents.1751879087.137f970d26fa.1.0 b/model_output/phi2_finetuned_logs/runs/Jul07_09-03-41_137f970d26fa/events.out.tfevents.1751879087.137f970d26fa.1.0 new file mode 100644 index 0000000000000000000000000000000000000000..c9e5ace698c4fa351d03bd34a971fcb0bdfa8d0b --- /dev/null +++ b/model_output/phi2_finetuned_logs/runs/Jul07_09-03-41_137f970d26fa/events.out.tfevents.1751879087.137f970d26fa.1.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f691ac02d6286f43e327869ade872e0d94fe8e5a39361551bc2ce4172721a05 +size 19583 diff --git a/model_output/phi2_finetuned_logs/runs/Jul08_09-43-04_730424d57e0c/events.out.tfevents.1751967877.730424d57e0c.1.0 b/model_output/phi2_finetuned_logs/runs/Jul08_09-43-04_730424d57e0c/events.out.tfevents.1751967877.730424d57e0c.1.0 new file mode 100644 index 0000000000000000000000000000000000000000..2c96988ed8c574efe6e71390802a1ca979d20639 --- /dev/null +++ b/model_output/phi2_finetuned_logs/runs/Jul08_09-43-04_730424d57e0c/events.out.tfevents.1751967877.730424d57e0c.1.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e6ce3714be6a862228f77ec1b6fbfd8b5d8a993b6b5533561c0c4b15cdc5921 +size 6362 diff --git a/model_output/phi2_finetuned_logs/runs/Jul08_10-04-11_bfa8fc5c5694/events.out.tfevents.1751969144.bfa8fc5c5694.1.0 b/model_output/phi2_finetuned_logs/runs/Jul08_10-04-11_bfa8fc5c5694/events.out.tfevents.1751969144.bfa8fc5c5694.1.0 new file mode 100644 index 0000000000000000000000000000000000000000..c391166bd9a5c749dd2597e930ec8bc0f1112372 --- /dev/null +++ b/model_output/phi2_finetuned_logs/runs/Jul08_10-04-11_bfa8fc5c5694/events.out.tfevents.1751969144.bfa8fc5c5694.1.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c003d4fdd3c674a5d2a85d5e87e1ac925d7baf986753b33b808f5d0a7da97ae8 +size 24227 diff --git a/phivenv/Lib/site-packages/charset_normalizer/md__mypyc.cp39-win_amd64.pyd b/phivenv/Lib/site-packages/charset_normalizer/md__mypyc.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..c8f19cdc09dea2729c5e1f2d84aab9ccc6a1949e --- /dev/null +++ b/phivenv/Lib/site-packages/charset_normalizer/md__mypyc.cp39-win_amd64.pyd @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09938fbd83e9ecf278c1da7f2ca3ad95ad0cd1ef39e82c1b4ecbac8c6b676ff9 +size 125440 diff --git a/phivenv/Lib/site-packages/distutils-precedence.pth b/phivenv/Lib/site-packages/distutils-precedence.pth new file mode 100644 index 0000000000000000000000000000000000000000..10c404f6ad452c148c46a39e11ddd4bc58530d16 --- /dev/null +++ b/phivenv/Lib/site-packages/distutils-precedence.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ea7ffef3fe2a117ee12c68ed6553617f0d7fd2f0590257c25c484959a3b7373 +size 152 diff --git a/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/INSTALLER b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/LICENSE.txt b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..42b6f17a65ef6636d00c93aefb514441145ecc94 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/LICENSE.txt @@ -0,0 +1,37 @@ +NetworkX is distributed with the 3-clause BSD license. + +:: + + Copyright (C) 2004-2023, NetworkX Developers + Aric Hagberg + Dan Schult + Pieter Swart + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NetworkX Developers nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/METADATA b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..ac51fe019585ed14b015f89a8d8908ccc7c356c2 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/METADATA @@ -0,0 +1,135 @@ +Metadata-Version: 2.1 +Name: networkx +Version: 3.2.1 +Summary: Python package for creating and manipulating graphs and networks +Author-email: Aric Hagberg +Maintainer-email: NetworkX Developers +Project-URL: Homepage, https://networkx.org/ +Project-URL: Bug Tracker, https://github.com/networkx/networkx/issues +Project-URL: Documentation, https://networkx.org/documentation/stable/ +Project-URL: Source Code, https://github.com/networkx/networkx +Keywords: Networks,Graph Theory,Mathematics,network,graph,discrete mathematics,math +Platform: Linux +Platform: Mac OSX +Platform: Windows +Platform: Unix +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Scientific/Engineering :: Bio-Informatics +Classifier: Topic :: Scientific/Engineering :: Information Analysis +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Physics +Requires-Python: >=3.9 +Description-Content-Type: text/x-rst +License-File: LICENSE.txt +Provides-Extra: default +Requires-Dist: numpy >=1.22 ; extra == 'default' +Requires-Dist: scipy !=1.11.0,!=1.11.1,>=1.9 ; extra == 'default' +Requires-Dist: matplotlib >=3.5 ; extra == 'default' +Requires-Dist: pandas >=1.4 ; extra == 'default' +Provides-Extra: developer +Requires-Dist: changelist ==0.4 ; extra == 'developer' +Requires-Dist: pre-commit >=3.2 ; extra == 'developer' +Requires-Dist: mypy >=1.1 ; extra == 'developer' +Requires-Dist: rtoml ; extra == 'developer' +Provides-Extra: doc +Requires-Dist: sphinx >=7 ; extra == 'doc' +Requires-Dist: pydata-sphinx-theme >=0.14 ; extra == 'doc' +Requires-Dist: sphinx-gallery >=0.14 ; extra == 'doc' +Requires-Dist: numpydoc >=1.6 ; extra == 'doc' +Requires-Dist: pillow >=9.4 ; extra == 'doc' +Requires-Dist: nb2plots >=0.7 ; extra == 'doc' +Requires-Dist: texext >=0.6.7 ; extra == 'doc' +Requires-Dist: nbconvert <7.9 ; extra == 'doc' +Provides-Extra: extra +Requires-Dist: lxml >=4.6 ; extra == 'extra' +Requires-Dist: pygraphviz >=1.11 ; extra == 'extra' +Requires-Dist: pydot >=1.4.2 ; extra == 'extra' +Requires-Dist: sympy >=1.10 ; extra == 'extra' +Provides-Extra: test +Requires-Dist: pytest >=7.2 ; extra == 'test' +Requires-Dist: pytest-cov >=4.0 ; extra == 'test' + +NetworkX +======== + + +.. image:: https://github.com/networkx/networkx/workflows/test/badge.svg?branch=main + :target: https://github.com/networkx/networkx/actions?query=workflow%3A%22test%22 + +.. image:: https://codecov.io/gh/networkx/networkx/branch/main/graph/badge.svg + :target: https://app.codecov.io/gh/networkx/networkx/branch/main + +.. image:: https://img.shields.io/github/labels/networkx/networkx/Good%20First%20Issue?color=green&label=Contribute%20&style=flat-square + :target: https://github.com/networkx/networkx/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22 + + +NetworkX is a Python package for the creation, manipulation, +and study of the structure, dynamics, and functions +of complex networks. + +- **Website (including documentation):** https://networkx.org +- **Mailing list:** https://groups.google.com/forum/#!forum/networkx-discuss +- **Source:** https://github.com/networkx/networkx +- **Bug reports:** https://github.com/networkx/networkx/issues +- **Report a security vulnerability:** https://tidelift.com/security +- **Tutorial:** https://networkx.org/documentation/latest/tutorial.html +- **GitHub Discussions:** https://github.com/networkx/networkx/discussions + +Simple example +-------------- + +Find the shortest path between two nodes in an undirected graph: + +.. code:: pycon + + >>> import networkx as nx + >>> G = nx.Graph() + >>> G.add_edge("A", "B", weight=4) + >>> G.add_edge("B", "D", weight=2) + >>> G.add_edge("A", "C", weight=3) + >>> G.add_edge("C", "D", weight=4) + >>> nx.shortest_path(G, "A", "D", weight="weight") + ['A', 'B', 'D'] + +Install +------- + +Install the latest version of NetworkX:: + + $ pip install networkx + +Install with all optional dependencies:: + + $ pip install networkx[all] + +For additional details, please see `INSTALL.rst`. + +Bugs +---- + +Please report any bugs that you find `here `_. +Or, even better, fork the repository on `GitHub `_ +and create a pull request (PR). We welcome all changes, big or small, and we +will help you make the PR if you are new to `git` (just ask on the issue and/or +see `CONTRIBUTING.rst`). + +License +------- + +Released under the 3-Clause BSD license (see `LICENSE.txt`):: + + Copyright (C) 2004-2023 NetworkX Developers + Aric Hagberg + Dan Schult + Pieter Swart diff --git a/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/RECORD b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..8c3ee9d034dcd55b6e336bfedb24182f8b726b7b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/RECORD @@ -0,0 +1,1137 @@ +networkx-3.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +networkx-3.2.1.dist-info/LICENSE.txt,sha256=ULWifLQ_eiDO3nqnuasgM1UuBBLJof3lHTiIXBQX6V8,1763 +networkx-3.2.1.dist-info/METADATA,sha256=tEByL1NhNlpdXiGfQDexQA_h5H6sFB1UMtQUJwDr3xQ,5232 +networkx-3.2.1.dist-info/RECORD,, +networkx-3.2.1.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +networkx-3.2.1.dist-info/entry_points.txt,sha256=b0FW-zm-m9itB-Zkm7w_8c9yX9WGGTg-r_N_A32PAGs,87 +networkx-3.2.1.dist-info/top_level.txt,sha256=s3Mk-7KOlu-kD39w8Xg_KXoP5Z_MVvgB-upkyuOE4Hk,9 +networkx/__init__.py,sha256=WwK4KM7w30c5F2xUgs4N0ylwaixla1rJ-4qThcPnjho,1091 +networkx/__pycache__/__init__.cpython-39.pyc,, +networkx/__pycache__/conftest.cpython-39.pyc,, +networkx/__pycache__/convert.cpython-39.pyc,, +networkx/__pycache__/convert_matrix.cpython-39.pyc,, +networkx/__pycache__/exception.cpython-39.pyc,, +networkx/__pycache__/lazy_imports.cpython-39.pyc,, +networkx/__pycache__/relabel.cpython-39.pyc,, +networkx/algorithms/__init__.py,sha256=Rz_AEhB6u0naGx7ejzbvXrMXixWM47ydmjnGOuofzqo,6512 +networkx/algorithms/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/__pycache__/asteroidal.cpython-39.pyc,, +networkx/algorithms/__pycache__/boundary.cpython-39.pyc,, +networkx/algorithms/__pycache__/bridges.cpython-39.pyc,, +networkx/algorithms/__pycache__/chains.cpython-39.pyc,, +networkx/algorithms/__pycache__/chordal.cpython-39.pyc,, +networkx/algorithms/__pycache__/clique.cpython-39.pyc,, +networkx/algorithms/__pycache__/cluster.cpython-39.pyc,, +networkx/algorithms/__pycache__/communicability_alg.cpython-39.pyc,, +networkx/algorithms/__pycache__/core.cpython-39.pyc,, +networkx/algorithms/__pycache__/covering.cpython-39.pyc,, +networkx/algorithms/__pycache__/cuts.cpython-39.pyc,, +networkx/algorithms/__pycache__/cycles.cpython-39.pyc,, +networkx/algorithms/__pycache__/d_separation.cpython-39.pyc,, +networkx/algorithms/__pycache__/dag.cpython-39.pyc,, +networkx/algorithms/__pycache__/distance_measures.cpython-39.pyc,, +networkx/algorithms/__pycache__/distance_regular.cpython-39.pyc,, +networkx/algorithms/__pycache__/dominance.cpython-39.pyc,, +networkx/algorithms/__pycache__/dominating.cpython-39.pyc,, +networkx/algorithms/__pycache__/efficiency_measures.cpython-39.pyc,, +networkx/algorithms/__pycache__/euler.cpython-39.pyc,, +networkx/algorithms/__pycache__/graph_hashing.cpython-39.pyc,, +networkx/algorithms/__pycache__/graphical.cpython-39.pyc,, +networkx/algorithms/__pycache__/hierarchy.cpython-39.pyc,, +networkx/algorithms/__pycache__/hybrid.cpython-39.pyc,, +networkx/algorithms/__pycache__/isolate.cpython-39.pyc,, +networkx/algorithms/__pycache__/link_prediction.cpython-39.pyc,, +networkx/algorithms/__pycache__/lowest_common_ancestors.cpython-39.pyc,, +networkx/algorithms/__pycache__/matching.cpython-39.pyc,, +networkx/algorithms/__pycache__/mis.cpython-39.pyc,, +networkx/algorithms/__pycache__/moral.cpython-39.pyc,, +networkx/algorithms/__pycache__/node_classification.cpython-39.pyc,, +networkx/algorithms/__pycache__/non_randomness.cpython-39.pyc,, +networkx/algorithms/__pycache__/planar_drawing.cpython-39.pyc,, +networkx/algorithms/__pycache__/planarity.cpython-39.pyc,, +networkx/algorithms/__pycache__/polynomials.cpython-39.pyc,, +networkx/algorithms/__pycache__/reciprocity.cpython-39.pyc,, +networkx/algorithms/__pycache__/regular.cpython-39.pyc,, +networkx/algorithms/__pycache__/richclub.cpython-39.pyc,, +networkx/algorithms/__pycache__/similarity.cpython-39.pyc,, +networkx/algorithms/__pycache__/simple_paths.cpython-39.pyc,, +networkx/algorithms/__pycache__/smallworld.cpython-39.pyc,, +networkx/algorithms/__pycache__/smetric.cpython-39.pyc,, +networkx/algorithms/__pycache__/sparsifiers.cpython-39.pyc,, +networkx/algorithms/__pycache__/structuralholes.cpython-39.pyc,, +networkx/algorithms/__pycache__/summarization.cpython-39.pyc,, +networkx/algorithms/__pycache__/swap.cpython-39.pyc,, +networkx/algorithms/__pycache__/threshold.cpython-39.pyc,, +networkx/algorithms/__pycache__/time_dependent.cpython-39.pyc,, +networkx/algorithms/__pycache__/tournament.cpython-39.pyc,, +networkx/algorithms/__pycache__/triads.cpython-39.pyc,, +networkx/algorithms/__pycache__/vitality.cpython-39.pyc,, +networkx/algorithms/__pycache__/voronoi.cpython-39.pyc,, +networkx/algorithms/__pycache__/walks.cpython-39.pyc,, +networkx/algorithms/__pycache__/wiener.cpython-39.pyc,, +networkx/algorithms/approximation/__init__.py,sha256=zf9NM64g-aZwEGqI5C0DpU5FML2GrkaaQsO6SW85atE,1177 +networkx/algorithms/approximation/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/clique.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/connectivity.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/distance_measures.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/dominating_set.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/kcomponents.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/matching.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/maxcut.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/ramsey.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/steinertree.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/treewidth.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-39.pyc,, +networkx/algorithms/approximation/clique.py,sha256=y4AeIvmGpMmM0kKRaFQKEORyUVmwakM_Vqz0Mdgnd94,7674 +networkx/algorithms/approximation/clustering_coefficient.py,sha256=jbWKI0fd79_vH4lHkzPXU1cyD8mb9MDej3v9OKQnv5s,2084 +networkx/algorithms/approximation/connectivity.py,sha256=PbGbwOk3dE4phai74vxz7pj1lK3q_gbYrNtMltu2MDw,13107 +networkx/algorithms/approximation/distance_measures.py,sha256=9MUKk23jP7klSgN718h7ttVM_xKPE4dMkxop7BvjkcU,5593 +networkx/algorithms/approximation/dominating_set.py,sha256=oGr5hRltwqd3JltGX-LIC7Sq7LGjltfdUGmBXWdAR7s,4214 +networkx/algorithms/approximation/kcomponents.py,sha256=AN9co8R7Lmgx5N_CbSrSmkQhfFbfbsJtwrUxSDBd284,13282 +networkx/algorithms/approximation/matching.py,sha256=1xfEMDlvcV8J78kgUhbC9FkaTnwC6mjYtNesGTYgmVg,1170 +networkx/algorithms/approximation/maxcut.py,sha256=rWHElMJvy5g4Yy_Tk11tlFFwQ16AV07K6rW1bIwWHTU,3664 +networkx/algorithms/approximation/ramsey.py,sha256=xOZDmJqCm-ya7utQhQOmVM_5LO-O_1e5UDw-YIWr9rY,1353 +networkx/algorithms/approximation/steinertree.py,sha256=tE-4_f1fPpR_hvCudJlzNzkYq_5kyT4OxxJBbfErWJ8,7487 +networkx/algorithms/approximation/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-39.pyc,, +networkx/algorithms/approximation/tests/test_approx_clust_coeff.py,sha256=PGOVEKf2BcJu1vvjZrgTlBBpwM8V6t7yCANjyS9nWF0,1171 +networkx/algorithms/approximation/tests/test_clique.py,sha256=JZ_ja03aVU7vnZ42Joy1ze0vjdcm_CnDhD96Z4W_Dcc,3022 +networkx/algorithms/approximation/tests/test_connectivity.py,sha256=gDG6tsgP3ux7Dgu0x7r0nso7_yknIxicV42Gq0It5pc,5952 +networkx/algorithms/approximation/tests/test_distance_measures.py,sha256=GSyupA_jqSc_pLPSMnZFNcBgZc8-KFWgt6Q7uFegTqg,2024 +networkx/algorithms/approximation/tests/test_dominating_set.py,sha256=l4pBDY7pK7Fxw-S4tOlNcxf-j2j5GpHPJ9f4TrMs1sI,2686 +networkx/algorithms/approximation/tests/test_kcomponents.py,sha256=tTljP1FHzXrUwi-oBz5AQcibRw1NgR4N5UE0a2OrOUA,9346 +networkx/algorithms/approximation/tests/test_matching.py,sha256=nitZncaM0605kaIu1NO6_5TFV2--nohUCO46XTD_lnM,186 +networkx/algorithms/approximation/tests/test_maxcut.py,sha256=HDFNx896WYi7do42P6C5tGTZsBpiqx7sUWm_2riE3nk,2426 +networkx/algorithms/approximation/tests/test_ramsey.py,sha256=h36Ol39csHbIoTDBxbxMgn4371iVUGZ3a2N6l7d56lI,1143 +networkx/algorithms/approximation/tests/test_steinertree.py,sha256=H6IKKl1kFeH96bJaI8CgSkXBJz34ceCft8DA7HNG-Mk,6901 +networkx/algorithms/approximation/tests/test_traveling_salesman.py,sha256=El7VoCuHfmb_DQxlQgo5k9L6lL6U4DBu70BgJ0REJyg,30697 +networkx/algorithms/approximation/tests/test_treewidth.py,sha256=MWFFcmjO0QxM8FS8iXSCtfGnk6eqG2kFyv1u2qnSeUo,9096 +networkx/algorithms/approximation/tests/test_vertex_cover.py,sha256=FobHNhG9CAMeB_AOEprUs-7XQdPoc1YvfmXhozDZ8pM,1942 +networkx/algorithms/approximation/traveling_salesman.py,sha256=FbysLItH41SzBjKIoByYqcvuqIboO8HwNYCO2DJjQ4g,54465 +networkx/algorithms/approximation/treewidth.py,sha256=MRGfLtAanCzDk1G6I6jTbC6MKn6lYreIe9XQdfRXGHE,8148 +networkx/algorithms/approximation/vertex_cover.py,sha256=s7s5v4TGqIlvgTAg2FVxRRUSA2BEp7szZg7FS_UpWAA,2798 +networkx/algorithms/assortativity/__init__.py,sha256=ov3HRRbeYB_6Qezvxp1OTl77GBpw-EWkWGUzgfT8G9c,294 +networkx/algorithms/assortativity/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/assortativity/__pycache__/connectivity.cpython-39.pyc,, +networkx/algorithms/assortativity/__pycache__/correlation.cpython-39.pyc,, +networkx/algorithms/assortativity/__pycache__/mixing.cpython-39.pyc,, +networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-39.pyc,, +networkx/algorithms/assortativity/__pycache__/pairs.cpython-39.pyc,, +networkx/algorithms/assortativity/connectivity.py,sha256=O1b3Iky0hlpdM6_QBmBNFfF4XeUsKDMj8fCid_bBRQE,4216 +networkx/algorithms/assortativity/correlation.py,sha256=6XUlbqlBgLyb8GDKsSvIrSgsL47Ti1jbLQoIdRVAj_k,8654 +networkx/algorithms/assortativity/mixing.py,sha256=adB-iqzA_lhjhnoOOZG9qK4ghRTZCjONYN9FePYqcj8,7551 +networkx/algorithms/assortativity/neighbor_degree.py,sha256=H1XQ9BenXxxHK_e6ZWtdIb3xYwYCrbtqEQ69Gasm7cA,5278 +networkx/algorithms/assortativity/pairs.py,sha256=qHALwEx_Q8N1B2ZszX8vs2BK2_0kc4lmbth4kMU6Nog,3393 +networkx/algorithms/assortativity/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/base_test.py,sha256=MNeQMLA3oBUCM8TSyNbBQ_uW0nDc1GEZYdNdUwePAm4,2651 +networkx/algorithms/assortativity/tests/test_connectivity.py,sha256=Js841GQLYTLWvc6xZhnyqj-JtyrnS0ska1TFYntxyXA,4978 +networkx/algorithms/assortativity/tests/test_correlation.py,sha256=1_D9GjLDnlT8Uy28lUn2fS1AHp2XBwiMpIl2OhRNDXk,5069 +networkx/algorithms/assortativity/tests/test_mixing.py,sha256=u-LIccNn-TeIAM766UtzUJQlY7NAbxF4EsUoKINzmlo,6820 +networkx/algorithms/assortativity/tests/test_neighbor_degree.py,sha256=ODP2M8jCaFr_l3ODwpwaz20-KqU2IFaEfJRBK53mpE8,3968 +networkx/algorithms/assortativity/tests/test_pairs.py,sha256=t05qP_-gfkbiR6aTLtE1owYl9otBSsuJcRkuZsa63UQ,3008 +networkx/algorithms/asteroidal.py,sha256=ARFht3oQvn95xCaaBEhy42djMIx4BuqsNf8VVlwBCEI,5852 +networkx/algorithms/bipartite/__init__.py,sha256=NQtAEpZ0IkjGVwfUbOzD7eoPLwulb_iZfh7-aDnyPWo,3826 +networkx/algorithms/bipartite/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/basic.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/centrality.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/cluster.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/covering.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/edgelist.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/extendability.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/generators.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/matching.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/matrix.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/projection.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/redundancy.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/spectral.cpython-39.pyc,, +networkx/algorithms/bipartite/basic.py,sha256=iqgNX-FUDwK2owu1APFTu6ldlw6QE2PaOuNiWEgHafQ,8350 +networkx/algorithms/bipartite/centrality.py,sha256=vkjnOLv5CQtfTOFpa2YhFZWRnMBFUCetGn1w7akAvq8,9144 +networkx/algorithms/bipartite/cluster.py,sha256=S9h8lu-usXFcXEJf6qUxZinf0LneqvKnEiUi9YKp7bo,6925 +networkx/algorithms/bipartite/covering.py,sha256=8pQEStjAGygcu83Cz88RfNAifUV7x8pC84LE2wWapsY,2160 +networkx/algorithms/bipartite/edgelist.py,sha256=aa5sHvwCLe0Lk7BK58tR5vMNjpnlfSaNSs6UY6G5vbc,11317 +networkx/algorithms/bipartite/extendability.py,sha256=CvF0zI__9899cMkq40vu_FfEcU-OeyCB4C2bHtMxxgE,3973 +networkx/algorithms/bipartite/generators.py,sha256=Hj-kPfih-bd74gHrZFpyeWtrMQbKS1uQyYiBqv5RxKQ,20231 +networkx/algorithms/bipartite/matching.py,sha256=kXgpv14FuL6k4KrKN68Z85dkKNgfBmxahTMn4N8aVoI,21620 +networkx/algorithms/bipartite/matrix.py,sha256=w9P7y4oS7vUFdv2dRQHAosuFDrH34YgMgUffQCSFRDE,6127 +networkx/algorithms/bipartite/projection.py,sha256=L9mkbufsE885rGhJ9t-7p-TqowBtUmrL8Zm95LeBygQ,17165 +networkx/algorithms/bipartite/redundancy.py,sha256=T2kDtj1xpSudwelE_ZnWFHnIXneZAMaXvv1m7pVF3Io,3397 +networkx/algorithms/bipartite/spectral.py,sha256=xxkLlaSByMJUP4Kz-XfuRhzTse_DqqsOtGzfiKIgdXc,1880 +networkx/algorithms/bipartite/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/test_basic.py,sha256=gzbtsQqPi85BznX5REdGBBJVyr9aH4nO06c3eEI4634,4291 +networkx/algorithms/bipartite/tests/test_centrality.py,sha256=PABPbrIyoAziEEQKXsZLl2jT36N8DZpNRzEO-jeu89Y,6362 +networkx/algorithms/bipartite/tests/test_cluster.py,sha256=O0VsPVt8vcY_E1FjjLJX2xaUbhVViI5MP6_gLTbEpos,2801 +networkx/algorithms/bipartite/tests/test_covering.py,sha256=EGVxYQsyLXE5yY5N5u6D4wZq2NcZe9OwlYpEuY6DF3o,1221 +networkx/algorithms/bipartite/tests/test_edgelist.py,sha256=UE7vm3iZshnlzIrcupso48en0kncxGUPU7XTQskgowg,7996 +networkx/algorithms/bipartite/tests/test_extendability.py,sha256=MsiRLfldka3Cz_h21BwPxnEOuKChntuI6mVCnIFnSs0,6780 +networkx/algorithms/bipartite/tests/test_generators.py,sha256=GLMThTKIfZ96NwTxIL0P0o0OAESZFfnySRkRjtKhao8,12794 +networkx/algorithms/bipartite/tests/test_matching.py,sha256=wFw095skCjW5YvQAnIie8mLacECVt0yUoeJFSj8ONAk,11972 +networkx/algorithms/bipartite/tests/test_matrix.py,sha256=EoqQKTMcPPPPUZYTzc-AAtl5F77qT0X3FI3E1tYppxM,2900 +networkx/algorithms/bipartite/tests/test_project.py,sha256=FBjkys3JYYzEG4aq_CsQrtm41edZibWI_uDAQ0b4wqM,15134 +networkx/algorithms/bipartite/tests/test_redundancy.py,sha256=ddjUzOQ0gkiWBLtVwVFYTJydaIdW3qAc4BCVscxj7-Q,919 +networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py,sha256=1jGDgrIx3-TWOCNMSC4zxmZa7LHyMU69DXh3h12Bjag,2358 +networkx/algorithms/boundary.py,sha256=GNuNDL280F7RXMzgkhUqFx-Zcg1LtP_Q0bNCYbMWIYU,5330 +networkx/algorithms/bridges.py,sha256=MuH_zEBqeSuyou8wszhnEzjJrUZSN-PpNtaScqUmR6E,6075 +networkx/algorithms/centrality/__init__.py,sha256=Er3YoYoj76UfY4P6I0L-0fCQkO7mMU0b3NLsTT2RGWI,558 +networkx/algorithms/centrality/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/betweenness.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/closeness.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/degree_alg.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/dispersion.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/eigenvector.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/group.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/harmonic.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/katz.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/laplacian.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/load.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/percolation.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/reaching.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/second_order.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/trophic.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-39.pyc,, +networkx/algorithms/centrality/betweenness.py,sha256=Uy9LCiUxzx1Jvgi7SgV-4AEG1BfvDy2ypWg6Xzfd0_8,14374 +networkx/algorithms/centrality/betweenness_subset.py,sha256=SW7uh0SyGhD_99gwAIGOHrJ9rlO3jXw7xHi-tOHmspE,9327 +networkx/algorithms/centrality/closeness.py,sha256=97qc3gCkitgyLh66sYcdpSRnl7cdUrDgRBP49jDlNNw,10252 +networkx/algorithms/centrality/current_flow_betweenness.py,sha256=eDoDGCVR1PIL5hIY07xRJ6Ze74S4v1HUGJGZ9DbfQW8,11871 +networkx/algorithms/centrality/current_flow_betweenness_subset.py,sha256=QAxgfH20BkeoGgjKPVHVRDrJ6kkGQ1MNmZIVV5baWaM,8046 +networkx/algorithms/centrality/current_flow_closeness.py,sha256=jId4MzTctT0NIJOrzTPsd_gomLSMAg_1SXuXGDZja18,3351 +networkx/algorithms/centrality/degree_alg.py,sha256=xwK263egt-sy-BBxVqL9CE7uF22UhJQls8NlVu3QfZU,3881 +networkx/algorithms/centrality/dispersion.py,sha256=Eld3WK97coVbsHjAJ3ewYI1vsJ0c4Nm3Yrznbec5G8c,3627 +networkx/algorithms/centrality/eigenvector.py,sha256=o0qmWOiMf18Hofdw0ACaAO3TASeJC89R1meCr3ILJi0,12738 +networkx/algorithms/centrality/flow_matrix.py,sha256=G7o6qTnkOlhUZ-DowDu5Xb0vQAiulXcP_veiuYpaPdU,3829 +networkx/algorithms/centrality/group.py,sha256=XBfaGSIgVc-an4Ecqvokd1smETuQ_lUTAtOduKA8YDg,27866 +networkx/algorithms/centrality/harmonic.py,sha256=0eFa5Kv-7Ff_TFuygH753cnQjDqeImkPgrKohtXhGFE,2626 +networkx/algorithms/centrality/katz.py,sha256=K3KVHs3RMGjywLa5tvwEk4xqoBedgytEXINcSjHzy4s,10941 +networkx/algorithms/centrality/laplacian.py,sha256=BHdSIiFBjqZGijThqYQHS3J2-KHnd8DdzQDwAXJwyt0,5403 +networkx/algorithms/centrality/load.py,sha256=6lFI7KqDsVit1QF1UXqrx5G_frELnG8UKOmqtETC5bs,6850 +networkx/algorithms/centrality/percolation.py,sha256=PcWtbDaXvTLeqr1PCbnIHjoD8XcfQTp1meff7FHU1As,4415 +networkx/algorithms/centrality/reaching.py,sha256=nMKAOEbpHajkGKIYxGYEhO70fI2KXeReVsJgaRplN0E,7017 +networkx/algorithms/centrality/second_order.py,sha256=hFk_RFwYpIRlh_iGPO-ZSNzp6Anb73jod_fLYTMvfYk,4966 +networkx/algorithms/centrality/subgraph_alg.py,sha256=BIxHyH7E3I1Ri2dDnnFPy9IOc65ouwk8Y_jc5JMEGRM,9472 +networkx/algorithms/centrality/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-39.pyc,, +networkx/algorithms/centrality/tests/test_betweenness_centrality.py,sha256=pKoPAP1hnQSgrOxYeW5-LdUiFDANiwTn_NdOdgccbo8,26795 +networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py,sha256=HrHMcgOL69Z6y679SbqZIjkQOnqrYSz24gt17AJ9q-o,12554 +networkx/algorithms/centrality/tests/test_closeness_centrality.py,sha256=XWZivyLjxYlF41U4ktUmvULC2PMvxKs2U6BHDXRZVdE,10209 +networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py,sha256=VOxx1A7iSGtdEbzJYea_sW_Hv0S71-oo1CVX7Rqd5RY,7870 +networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py,sha256=JfRGgPuiF-vJu5fc2_pcJYREEboxcK_dmy-np39c4Aw,5839 +networkx/algorithms/centrality/tests/test_current_flow_closeness.py,sha256=vflQeoNKngrGUiRb3XNlm2X9wR4vKgMSW_sCyMUCQi8,1379 +networkx/algorithms/centrality/tests/test_degree_centrality.py,sha256=TxD7UBtezF4RCdbCAuTsSB5lcFOQZrGnLOuCMa0XWY0,4105 +networkx/algorithms/centrality/tests/test_dispersion.py,sha256=ROgl_5bGhcNXonNW3ylsvUcA0NCwynsQu_scic371Gw,1959 +networkx/algorithms/centrality/tests/test_eigenvector_centrality.py,sha256=MsHKkQX7oip4v0kF28K1RjtKqxSNVykiSjg8wT20YyE,4897 +networkx/algorithms/centrality/tests/test_group.py,sha256=YmWifoTgw2gSS5BnA9G2T_Voauk_WG6v90JrZEt-Kjk,8686 +networkx/algorithms/centrality/tests/test_harmonic_centrality.py,sha256=wYP0msmB5hh5OMIxPl9t0G4QSpG3Brxw98Kh9BrRoag,3658 +networkx/algorithms/centrality/tests/test_katz_centrality.py,sha256=JL0bZZsJe2MQFL6urXgY82wCAwucUvhjaShYZPxpL6U,11240 +networkx/algorithms/centrality/tests/test_laplacian_centrality.py,sha256=vY-NULtr_U_GxUMwfAZB-iccxIRTiqqUN4Q8HRNpzSo,5916 +networkx/algorithms/centrality/tests/test_load_centrality.py,sha256=Vv3zSW89iELN-8KNbUclmkhOe1LzKdF7U_w34nYovIo,11343 +networkx/algorithms/centrality/tests/test_percolation_centrality.py,sha256=ycQ1fvEZZcWAfqL11urT7yHiEP77usJDSG25OQiDM2s,2591 +networkx/algorithms/centrality/tests/test_reaching.py,sha256=sqQUPspoiWxs9tD77UwngBkMVFYjRzhayVxPqX9_XbY,4143 +networkx/algorithms/centrality/tests/test_second_order_centrality.py,sha256=ce0wQ4T33lu23wskzGUnBS7X4BSODlvAX1S5KxlLzOA,1999 +networkx/algorithms/centrality/tests/test_subgraph.py,sha256=vhE9Uh-_Hlk49k-ny6ORHCgqk7LWH8OHIYOEYM96uz0,3729 +networkx/algorithms/centrality/tests/test_trophic.py,sha256=AzV6rwcTa4b4tcenoKh95o6VF-z7w75l81ZOdhhi6yE,8705 +networkx/algorithms/centrality/tests/test_voterank.py,sha256=7Z9aQYKqEw_txBbWTz1FZWJzUmhjlMfDFSRIKHBdkOk,1692 +networkx/algorithms/centrality/trophic.py,sha256=ay_R2GtxxfP5muSoxSETt6wdqYka14J1f1Z9zafY790,4654 +networkx/algorithms/centrality/voterank_alg.py,sha256=UG71jAEm4b0vqj6ZQ-so8yqQtdxDoK8XI5CWTdjAyhg,3227 +networkx/algorithms/chains.py,sha256=wUNxO0v_nH9m5efeV0IXzog_5EbBl0PkVzgDJoCAJts,6964 +networkx/algorithms/chordal.py,sha256=H5fdhTaX5UzKWVj4mQQd7cjj_oj84R91rUGRiuug_Gk,13285 +networkx/algorithms/clique.py,sha256=gM-ksRAEX1eDr_Irk85uuUGVKRpFIYijeOo385mVsxs,25802 +networkx/algorithms/cluster.py,sha256=qkwVMieIgXyK2u01cYD9xrZtgpDAODmH3aggvUxxPVc,20281 +networkx/algorithms/coloring/__init__.py,sha256=P1cmqrAjcaCdObkNZ1e6Hp__ZpxBAhQx0iIipOVW8jg,182 +networkx/algorithms/coloring/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-39.pyc,, +networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-39.pyc,, +networkx/algorithms/coloring/equitable_coloring.py,sha256=JnL_TM3sTewENSKkbHIOOfNoaqXYkoEjzmfgf3xD9C8,16279 +networkx/algorithms/coloring/greedy_coloring.py,sha256=Tuo215orZ6k7znMd9wkyv3irZNAl1dyEryi72mC_im4,20170 +networkx/algorithms/coloring/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-39.pyc,, +networkx/algorithms/coloring/tests/test_coloring.py,sha256=A2cAG--i7pTVolIK96mxNuCTtLvhbLVRqJ4MAfWUBEQ,23712 +networkx/algorithms/communicability_alg.py,sha256=8omQmig1RyfVYfB_bfX40AX3Vg4PD4D-cmjelQzzKFc,4536 +networkx/algorithms/community/__init__.py,sha256=gKUySRds_lxaCw0kEpPJ1vluQwN4cV3ayt4U_8fok_M,1125 +networkx/algorithms/community/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/asyn_fluid.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/centrality.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/community_utils.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/kclique.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/kernighan_lin.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/label_propagation.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/louvain.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/lukes.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/modularity_max.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/quality.cpython-39.pyc,, +networkx/algorithms/community/asyn_fluid.py,sha256=Qn8tNzdrXA1DVVHuZi-YjvBsUSbj3J6WwekrSVXBs74,5912 +networkx/algorithms/community/centrality.py,sha256=AEuGeTP_vWSyWr4vC3r-63cTfQ7mV2vDTqv_vZUkoxQ,6631 +networkx/algorithms/community/community_utils.py,sha256=u4q9DSo_QyROG7Qci2-Cvphu4n_VM8AeYXYKkXQXxws,903 +networkx/algorithms/community/kclique.py,sha256=tG0GOot8kY-wnaGA0XdNo0VKKoa1hJprMqXszcA00Pc,2456 +networkx/algorithms/community/kernighan_lin.py,sha256=-pQEXeOBE6JnHqMo-5M6igzGcBNbeOWK8AQ51joeN-E,4345 +networkx/algorithms/community/label_propagation.py,sha256=pcGwq8qhZQPK7LSrmsL54lf0ljC4PzBQaE2s_vvsWmU,11846 +networkx/algorithms/community/louvain.py,sha256=smccDNEHuRC3aqBJJ7ijKTQQ8vy1c-zcSuPaeqoQqDw,14764 +networkx/algorithms/community/lukes.py,sha256=OxwTxVKYNEd4evk4htBNDw_IeNujUIvPuydAfT-ewZk,8086 +networkx/algorithms/community/modularity_max.py,sha256=mcQxD2iQduY8H2-Ep3Agg7BSb-UTUqzOEcHm-424sC8,18020 +networkx/algorithms/community/quality.py,sha256=G7ogU-CYh-78EWGUyPKKR55K0iFrZclHawzy9gvBW-4,11919 +networkx/algorithms/community/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/community/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_quality.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_utils.cpython-39.pyc,, +networkx/algorithms/community/tests/test_asyn_fluid.py,sha256=5DDArgCUSRVXrlG21R5Yu6Gg96xsivqvEib17VGOZLM,3057 +networkx/algorithms/community/tests/test_centrality.py,sha256=ADU1mFn7yl9kTtQjOkfPtjpmkBR_i_6hwbVkWh5qZmw,2931 +networkx/algorithms/community/tests/test_kclique.py,sha256=iA0SBqwbDfaD2u7KM6ccs6LfgAQY_xxrnW05UIT_tFA,2413 +networkx/algorithms/community/tests/test_kernighan_lin.py,sha256=s8bK53Y1a87zvlZ1AJE-QJ2vItnbscSOlHQSrMpetGI,2709 +networkx/algorithms/community/tests/test_label_propagation.py,sha256=uOyx9-rLQCNidVwJ5EcjAlAOubjkK6HooZff5CCYki4,7870 +networkx/algorithms/community/tests/test_louvain.py,sha256=m8TQDH3fX2ygvWVn-mtP3EEmi1F7JF-J_WYuSAxvGXs,7257 +networkx/algorithms/community/tests/test_lukes.py,sha256=f_JU-EzY6PwXEkPN8kk5_3NVg6phlX0nrj1f57M49lk,3961 +networkx/algorithms/community/tests/test_modularity_max.py,sha256=mqtalSff4cmpAPyOExOolfICOE7YuOtHA3BqT84eZlg,10365 +networkx/algorithms/community/tests/test_quality.py,sha256=_kbOlYD1mpPduNQU1wJx58we6Z8CbmQ8wsDwOqTE4hg,5274 +networkx/algorithms/community/tests/test_utils.py,sha256=r_YEdGUaGZo8B16FxzocmkgpRrWgqyN7ehvx_qFiYu4,706 +networkx/algorithms/components/__init__.py,sha256=Dt74KZWp_cJ_j0lL5hd_S50_hia5DKcC2SjuRnubr6M,173 +networkx/algorithms/components/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/components/__pycache__/attracting.cpython-39.pyc,, +networkx/algorithms/components/__pycache__/biconnected.cpython-39.pyc,, +networkx/algorithms/components/__pycache__/connected.cpython-39.pyc,, +networkx/algorithms/components/__pycache__/semiconnected.cpython-39.pyc,, +networkx/algorithms/components/__pycache__/strongly_connected.cpython-39.pyc,, +networkx/algorithms/components/__pycache__/weakly_connected.cpython-39.pyc,, +networkx/algorithms/components/attracting.py,sha256=DYv4WYi7o65w2gszDcNVPlxPYDESDA_r0Z4gDzfpEDA,2699 +networkx/algorithms/components/biconnected.py,sha256=6GRTNyPgwvboDpUdjA9GODDa9vtxTNELEirHkcDHuXs,12765 +networkx/algorithms/components/connected.py,sha256=CiwwhpZo_ppuSCm63cMkm64IJybY_OAOxewWCaGUU7s,4312 +networkx/algorithms/components/semiconnected.py,sha256=M_bCya75ayQONDqv4HCfV8fAXPITAQbP7pdOl7mt8BQ,2025 +networkx/algorithms/components/strongly_connected.py,sha256=EoxDU6BDAp11v57vQvsiQGmZ_1C9iszd4ZDutN4KWAc,11712 +networkx/algorithms/components/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/components/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/components/tests/__pycache__/test_attracting.cpython-39.pyc,, +networkx/algorithms/components/tests/__pycache__/test_biconnected.cpython-39.pyc,, +networkx/algorithms/components/tests/__pycache__/test_connected.cpython-39.pyc,, +networkx/algorithms/components/tests/__pycache__/test_semiconnected.cpython-39.pyc,, +networkx/algorithms/components/tests/__pycache__/test_strongly_connected.cpython-39.pyc,, +networkx/algorithms/components/tests/__pycache__/test_weakly_connected.cpython-39.pyc,, +networkx/algorithms/components/tests/test_attracting.py,sha256=b3N3ZR9E5gLSQWGgaqhcRfRs4KBW6GnnkVYeAjdxC_o,2243 +networkx/algorithms/components/tests/test_biconnected.py,sha256=N-J-dgBgI77ytYUUrXjduLxtDydH7jS-af98fyPBkYc,6036 +networkx/algorithms/components/tests/test_connected.py,sha256=BTbxVcorGH8wKVXOO7D3bn0WR8lXK-Kijm-XDmQhiMY,3983 +networkx/algorithms/components/tests/test_semiconnected.py,sha256=q860lIxZF5M2JmDwwdzy-SGSXnrillOefMx23GcJpw0,1792 +networkx/algorithms/components/tests/test_strongly_connected.py,sha256=66c4bPIdcl1hKEZAY5Wjpglk_mrcVCoDxaKBOaZz754,6639 +networkx/algorithms/components/tests/test_weakly_connected.py,sha256=yi23wxW2Vw6JOMqaWMEuqNRxnleriuAQrZ5JGWE48Jk,2887 +networkx/algorithms/components/weakly_connected.py,sha256=mDxdyU7oGqWTYWY0Rh_VRbR5hcMFhy6yXFb_W20LkxU,4366 +networkx/algorithms/connectivity/__init__.py,sha256=VuUXTkagxX-tHjgmeYJ3K4Eq_luK6kSpv1nZwiwGFd8,281 +networkx/algorithms/connectivity/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/connectivity.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/cuts.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/kcutsets.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/utils.cpython-39.pyc,, +networkx/algorithms/connectivity/connectivity.py,sha256=X5tB-FevO5B-514-zb3LoaSOHlcBX0ockB5fBdh2E58,29912 +networkx/algorithms/connectivity/cuts.py,sha256=vCr5z2lvAa4cYIAhmnL-cYr7jVRL5a0TbbrV3Qb_xtQ,23183 +networkx/algorithms/connectivity/disjoint_paths.py,sha256=rQ1qZepPW4j0RnzMefaFtFbd4hsnjZ6tpiUSQwEpDxE,14852 +networkx/algorithms/connectivity/edge_augmentation.py,sha256=IJmZg75CiEmpIE0tQyFzbd6ZKFKGH--hBs7yuYMLzAA,43988 +networkx/algorithms/connectivity/edge_kcomponents.py,sha256=8jQ-ba3qxdCRK8dFDTAAcqG55-vOfJ_smRXeQAIw6FU,20709 +networkx/algorithms/connectivity/kcomponents.py,sha256=Ax0v4yudKFbkuzmek2TUEh3UPFIPoy6gJ7N4ZtUiO-A,8166 +networkx/algorithms/connectivity/kcutsets.py,sha256=rtSXzS7uIaNewh7RT_-lukXvr48_Cdl56VKalb8bQ50,9423 +networkx/algorithms/connectivity/stoerwagner.py,sha256=RW_Zx4wsdikYH8UB34zLDVLBBfpPQ4UBSOh_oYwhkMI,5375 +networkx/algorithms/connectivity/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/connectivity/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_connectivity.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_cuts.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_disjoint_paths.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_edge_augmentation.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_edge_kcomponents.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_kcomponents.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_kcutsets.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_stoer_wagner.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/test_connectivity.py,sha256=eSmsi8uQk6MI591JgtSu2elIusb08bmSZS0h9gxb76I,15027 +networkx/algorithms/connectivity/tests/test_cuts.py,sha256=4F8seWb-sPDDjjVMkh14gst5UQa5f-zDkCsZIdJjVzo,10353 +networkx/algorithms/connectivity/tests/test_disjoint_paths.py,sha256=NLHReLoXSKoA6KPBNRbjF84ktg5PEaaktIj2AII3SDY,8392 +networkx/algorithms/connectivity/tests/test_edge_augmentation.py,sha256=d3ymFHyY2G4cpy1Y6wu4ze339qfF2LRp2HmGAIVjnMM,15731 +networkx/algorithms/connectivity/tests/test_edge_kcomponents.py,sha256=CZ26Dy91WOUqhw1X73mqLGX-WHWzBBIeBCgrp6KK4Zo,16453 +networkx/algorithms/connectivity/tests/test_kcomponents.py,sha256=ohoSX8GACeszRZdzTiNuWXSFitfU9DzP0hqllS2gvMU,8554 +networkx/algorithms/connectivity/tests/test_kcutsets.py,sha256=TU6vl9cVtl7GstL2OrPGwVX2PY1R_AGQ6lJ9QQX5UBQ,8458 +networkx/algorithms/connectivity/tests/test_stoer_wagner.py,sha256=A291C30_t2CI1erPCqN1W0DoAj3zqNA8fThPIj4Rku0,3011 +networkx/algorithms/connectivity/utils.py,sha256=8h29TgBEeaZbF_4OFNgtY2XLqURD_va_wezmz709Qfs,3168 +networkx/algorithms/core.py,sha256=mNKH8fwCgbCbaQAcIdZq8Dx9p_bsHiyQn4fAXOdR5K4,15990 +networkx/algorithms/covering.py,sha256=4SiBc9eJi4vQ0N5juRfw6atcyjH1xB_iSWwT6WOZhYk,5290 +networkx/algorithms/cuts.py,sha256=VSEhUHwqRFhfS70L-PzsUGEzN09uhC69B-5hwSixj7A,9960 +networkx/algorithms/cycles.py,sha256=eHduXG1NbfMOIT-RNDWHMyDeRiWC-3IAOokh0kyIDhs,43080 +networkx/algorithms/d_separation.py,sha256=2l6sRqNEldQQltIlVo7lK6ew115PYLT0OJcCjeQBnJY,15440 +networkx/algorithms/dag.py,sha256=1LkfG8kYN-dAjXk21Tmo7eCr9FaYpp4kMX37JCktlY4,39144 +networkx/algorithms/distance_measures.py,sha256=pMMLUutcc93wdbTfKkltEic7dcj5st95eEPpuxtHiNc,29136 +networkx/algorithms/distance_regular.py,sha256=KksQ9jiqigD5DzD63DXmxoeV8RAj1TF7hjyi9FLjbws,6914 +networkx/algorithms/dominance.py,sha256=wO5FnplVOSkPBdFSBcPQMgt-0bykxaEtXmdqRjGg3d8,3422 +networkx/algorithms/dominating.py,sha256=lA9lP6SXtjZsUPyKM58Uflgp58aSqfveTeoe9aQuy7Y,2675 +networkx/algorithms/efficiency_measures.py,sha256=OGkRnD5lrXUqr4TAPW9f_y2g89k9V_-L1EkvWy9Yibo,4786 +networkx/algorithms/euler.py,sha256=hf2HPmE6GkRm6DhG0-DD4wIJmvnAXy__ddXx2BGqsBU,14160 +networkx/algorithms/flow/__init__.py,sha256=rVtMUy6dViPLewjDRntmn15QF0bQwiDdQbZZx9j7Drc,341 +networkx/algorithms/flow/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/capacityscaling.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/edmondskarp.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/gomory_hu.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/maxflow.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/mincost.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/networksimplex.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/preflowpush.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/utils.cpython-39.pyc,, +networkx/algorithms/flow/boykovkolmogorov.py,sha256=gJFnK5qZMg8xMWs2-aGx5-LLM25C48x2IPy-_50V6_c,13435 +networkx/algorithms/flow/capacityscaling.py,sha256=G4wdqfhQ4Gf7Fx3Eoh5_DUnpzD_qOT-8yWfaZ1dbbWA,14459 +networkx/algorithms/flow/dinitz_alg.py,sha256=mtSov40Oay_kz2v381MkOp5OSpDhKiH2OSINkrImsE0,7310 +networkx/algorithms/flow/edmondskarp.py,sha256=iafmZIMPO8euDc7uJQ1dg84s4a9OzK4tKMthF2jQoeo,8292 +networkx/algorithms/flow/gomory_hu.py,sha256=5fEaPaTi9_ox7CarltPwqSEGnF3OyxJJfWf04g-Aa50,6320 +networkx/algorithms/flow/maxflow.py,sha256=xyVgIMRtxRSUHZb-4txwc78ZEIFixzCQffG5NxioR98,22809 +networkx/algorithms/flow/mincost.py,sha256=JA6lLUmQ-UyxrdWdzbS3x8oK3t4MaPPBf_44Pgor9yA,12248 +networkx/algorithms/flow/networksimplex.py,sha256=eimOUJ4n2-jRBN8LgPoeuFrU38O2JRCB1O0uEkG7wkg,25175 +networkx/algorithms/flow/preflowpush.py,sha256=wIl2b0MpnhunZb4HpShxMVy1NgPggsAGF7chP0crKKw,15823 +networkx/algorithms/flow/shortestaugmentingpath.py,sha256=7vfa73BxJ6cHfzTy6ibXoD8DuOjK2lgp-EshUeimvZA,10474 +networkx/algorithms/flow/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/flow/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-39.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-39.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-39.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-39.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-39.pyc,, +networkx/algorithms/flow/tests/gl1.gpickle.bz2,sha256=z4-BzrXqruFiGqYLiS2D5ZamFz9vZRc1m2ef89qhsPg,44623 +networkx/algorithms/flow/tests/gw1.gpickle.bz2,sha256=b3nw6Q-kxR7HkWXxWWPh7YlHdXbga8qmeuYiwmBBGTE,42248 +networkx/algorithms/flow/tests/netgen-2.gpickle.bz2,sha256=OxfmbN7ajtuNHexyYmx38fZd1GdeP3bcL8T9hKoDjjA,18972 +networkx/algorithms/flow/tests/test_gomory_hu.py,sha256=aWtbI3AHofIK6LDJnmj9UH1QOfulXsi5NyB7bNyV2Vw,4471 +networkx/algorithms/flow/tests/test_maxflow.py,sha256=YRgkrdRj6NMHOXio2Zgr7-ErEzCbq7Z0w90azNffCC4,18727 +networkx/algorithms/flow/tests/test_maxflow_large_graph.py,sha256=fMweTQ3MzsZWYI-ul2dGR8OfGQeo8df2fLeCleHqxZw,4623 +networkx/algorithms/flow/tests/test_mincost.py,sha256=n4fFLDwDLy7Tau-_ey1CoxZwKhFjk28GLGJjCyxhClk,17816 +networkx/algorithms/flow/tests/test_networksimplex.py,sha256=bsVxlvHAD0K7aDevCcVaa9uRNNsWAevw6yUKlj2T8No,12103 +networkx/algorithms/flow/tests/wlm3.gpickle.bz2,sha256=zKy6Hg-_swvsNh8OSOyIyZnTR0_Npd35O9RErOF8-g4,88132 +networkx/algorithms/flow/utils.py,sha256=TyckjUeH5qcBUSARpkuZDXaVirYGuo9xvJK8cno0T38,6001 +networkx/algorithms/graph_hashing.py,sha256=cOAW2XlFvuYokbmhEeKiX4KQgIHL6PjraBqsZizcj_A,11887 +networkx/algorithms/graphical.py,sha256=BYh1nXb2Kg1AYLJBz926QIKcHkAtXqInpsb7QSzykAQ,15807 +networkx/algorithms/hierarchy.py,sha256=jDj8Ld7InknG7OVLVSnT82cqzNvBQ4FT5Qso107kQVQ,1541 +networkx/algorithms/hybrid.py,sha256=fFfA7Ki4zKxm9r8VEvreUqyCvGeCygsBbJZPsvXUQ7A,6180 +networkx/algorithms/isolate.py,sha256=toiuRPi4qb06D_hREZWbAcDJ4c8yx8aKftWez22Efj0,2325 +networkx/algorithms/isomorphism/__init__.py,sha256=gPRQ-_X6xN2lJZPQNw86IVj4NemGmbQYTejf5yJ32N4,406 +networkx/algorithms/isomorphism/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/ismags.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/isomorph.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/isomorphvf2.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/matchhelpers.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/temporalisomorphvf2.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/tree_isomorphism.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/vf2pp.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/vf2userfunc.cpython-39.pyc,, +networkx/algorithms/isomorphism/ismags.py,sha256=5KRimh6jxs7BIDqAo48d01i8_1WkkckV0xVWkQ64czs,43529 +networkx/algorithms/isomorphism/isomorph.py,sha256=PQONDdw4Mc6neaPhW7yVQxoxOrbOqYorgBse4OjYtBA,7097 +networkx/algorithms/isomorphism/isomorphvf2.py,sha256=1LWpe54aulfYukTS87DoS4l1reCpOqZEr-74MOQLrRc,40528 +networkx/algorithms/isomorphism/matchhelpers.py,sha256=b7A7SwbqXj8CKAw-vrISgBNhDcEXobPjljkOhyWn4aM,10891 +networkx/algorithms/isomorphism/temporalisomorphvf2.py,sha256=-1NW81l8kM9orQ2ni9tcNizQzEhOUE9BaBJXjUWqhiI,10948 +networkx/algorithms/isomorphism/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/isomorphism/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_ismags.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphism.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphvf2.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_match_helpers.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_temporalisomorphvf2.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_tree_isomorphism.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp_helpers.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_vf2userfunc.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/iso_r01_s80.A99,sha256=hKzMtYLUR8Oqp9pmJR6RwG7qo31aNPZcnXy4KHDGhqU,1442 +networkx/algorithms/isomorphism/tests/iso_r01_s80.B99,sha256=AHx_W2xG4JEcz1xKoN5TwCHVE6-UO2PiMByynkd4TPE,1442 +networkx/algorithms/isomorphism/tests/si2_b06_m200.A99,sha256=NVnPFA52amNl3qM55G1V9eL9ZlP9NwugBlPf-zekTFU,310 +networkx/algorithms/isomorphism/tests/si2_b06_m200.B99,sha256=-clIDp05LFNRHA2BghhGTeyuXDqBBqA9XpEzpB7Ku7M,1602 +networkx/algorithms/isomorphism/tests/test_ismags.py,sha256=2sOkbB7Aejnq4zDx9BhJyfavf5DLiKJaUPusb3fhGRk,10585 +networkx/algorithms/isomorphism/tests/test_isomorphism.py,sha256=1GZmmqNWk605Qq9h55V_5SfEKPM50Ceq6DSICdh6ufs,1663 +networkx/algorithms/isomorphism/tests/test_isomorphvf2.py,sha256=s4yO4cHJk5qIpRemnSzD1MJEeSJPNpZcOU6LeWVhGXI,11751 +networkx/algorithms/isomorphism/tests/test_match_helpers.py,sha256=uuTcvjgf2LPqSQzzECPIh0dezw8-a1IN0u42u8TxwAw,2483 +networkx/algorithms/isomorphism/tests/test_temporalisomorphvf2.py,sha256=DZy2zAt74jiTAM-jGK5H9aGRn1ZsMgQl9K5UNsu178Y,7346 +networkx/algorithms/isomorphism/tests/test_tree_isomorphism.py,sha256=yj3C8ZBhi57I6kxetfneGpTse9hrYBvJQfGb0qks_G0,7066 +networkx/algorithms/isomorphism/tests/test_vf2pp.py,sha256=65RkN1mPWLoxirE7SlIvfaKMJk80b_ZwWG6HTJtlkPg,49924 +networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py,sha256=s4zz4IYYm2q8nHmkG0eRI2yJjcTx6zjRL7HTVIl1a-s,90080 +networkx/algorithms/isomorphism/tests/test_vf2userfunc.py,sha256=yby-vt4sYxc1uzlnD-iETREbojgNkpQGbLkrPER_Sss,6629 +networkx/algorithms/isomorphism/tree_isomorphism.py,sha256=HKPUDU1oCYCfEgyNYN0e31K9PqE3hsqHZMX8Iuq7i1Q,9397 +networkx/algorithms/isomorphism/vf2pp.py,sha256=oKBYHbwS0j3UihEI7LgaUZ7sMhv3nLLFrnCk3jETxnw,36383 +networkx/algorithms/isomorphism/vf2userfunc.py,sha256=VVTNWEzHnRaZrjtinBnkStRNsvC9FVvivXWs-pqG6LM,7475 +networkx/algorithms/link_analysis/__init__.py,sha256=UkcgTDdzsIu-jsJ4jBwP8sF2CsRPC1YcZZT-q5Wlj3I,118 +networkx/algorithms/link_analysis/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-39.pyc,, +networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-39.pyc,, +networkx/algorithms/link_analysis/hits_alg.py,sha256=5ntPDFZNGYbrw0Bq4WNvmoIWBxSa6PtIoBVHhHmv-8M,10244 +networkx/algorithms/link_analysis/pagerank_alg.py,sha256=0l7xABhW3Vkhx07y87NynTdYqcTql0UAVfCghURhZFk,17183 +networkx/algorithms/link_analysis/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/link_analysis/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/link_analysis/tests/__pycache__/test_hits.cpython-39.pyc,, +networkx/algorithms/link_analysis/tests/__pycache__/test_pagerank.cpython-39.pyc,, +networkx/algorithms/link_analysis/tests/test_hits.py,sha256=BXMNyKv4OfRKXH9W8u8qCV3zaghDlEItRhYLN0TB-CM,2525 +networkx/algorithms/link_analysis/tests/test_pagerank.py,sha256=g0HyPn5HBXZeu-TQSWqqTzOfnzaejRfBuIIpKYSGecE,7530 +networkx/algorithms/link_prediction.py,sha256=z4abyN_TIEZiq6TN9YP7TYvrdiDhzgg-Irn9UW1IkYQ,19968 +networkx/algorithms/lowest_common_ancestors.py,sha256=popl_tFPaN5r4P1UQ47-qzEQ9EM0EQCeA7F_9th71Ek,9186 +networkx/algorithms/matching.py,sha256=ePjtahy-HIMke90HjmcgO1mJOUcG7WYJFfFKuAqQ8Jk,44530 +networkx/algorithms/minors/__init__.py,sha256=ceeKdsZ6U1H40ED-KmtVGkbADxeWMTVG07Ja8P7N_Pg,587 +networkx/algorithms/minors/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/minors/__pycache__/contraction.cpython-39.pyc,, +networkx/algorithms/minors/contraction.py,sha256=dS3lUcojiGydTV2IOrpyD3UxChG5ZYqHy74c-Euulns,22735 +networkx/algorithms/minors/tests/__pycache__/test_contraction.cpython-39.pyc,, +networkx/algorithms/minors/tests/test_contraction.py,sha256=rob7wHlt3xoXYxpcXQOwm7zP0TLyRqWV1JxsZlE8kfo,14212 +networkx/algorithms/mis.py,sha256=9ZdCuXXAlKOVHgWJMtillI1vLOXQGlRpSoz-rbnMU3I,2339 +networkx/algorithms/moral.py,sha256=k9uZGz0S6YK3U5hoiRLzR--PRQNktNdaI9-lrzsrgBg,1511 +networkx/algorithms/node_classification.py,sha256=ACq6C3i2p-f5E4UGHK8XQ6ng6ZVf6DxHdx4xR0x1zrA,6461 +networkx/algorithms/non_randomness.py,sha256=wEXsl0fat8w0SAROXT_mB0B4fidVD8N5Ue5rLDMcu7Q,2893 +networkx/algorithms/operators/__init__.py,sha256=dJ3xOXvHxSzzM3-YcfvjGTJ_ndxULF1TybkIRzUS87Y,201 +networkx/algorithms/operators/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/operators/__pycache__/all.cpython-39.pyc,, +networkx/algorithms/operators/__pycache__/binary.cpython-39.pyc,, +networkx/algorithms/operators/__pycache__/product.cpython-39.pyc,, +networkx/algorithms/operators/__pycache__/unary.cpython-39.pyc,, +networkx/algorithms/operators/all.py,sha256=_1em4J-Y6GQK0_UlkTIKZ7DNdTSzdXkZ748gAq9dxkg,9544 +networkx/algorithms/operators/binary.py,sha256=Un3NpZQxmGB3EhQY-_-qb-Kzr6lJy9RRVJZR0FOSrVI,12689 +networkx/algorithms/operators/product.py,sha256=3hm3Q1K3BaH6RYUTP369jGgFSVsyAF3crDS74PKlVDM,16115 +networkx/algorithms/operators/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/operators/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_all.cpython-39.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_binary.cpython-39.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_product.cpython-39.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_unary.cpython-39.pyc,, +networkx/algorithms/operators/tests/test_all.py,sha256=Pqjv9QiA0875Yl9D5o6c5Ml0t4KHpH2a5jbpAoZQXFc,8250 +networkx/algorithms/operators/tests/test_binary.py,sha256=N_HEuvjUPneQK44rYo8AMhR7OdfQk76U9EqIXCt65X4,12795 +networkx/algorithms/operators/tests/test_product.py,sha256=hbnfR6gKXhl2BiEHKrgi4hMYIie95noooNmVBws1iLo,13402 +networkx/algorithms/operators/tests/test_unary.py,sha256=UZdzbt5GI9hnflEizUWXihGqBWmSFJDkzjwVv6wziQE,1415 +networkx/algorithms/operators/unary.py,sha256=9cLxWpgt7aleGAgL608nW99s1gaw5kVGa-LybPt--qY,1745 +networkx/algorithms/planar_drawing.py,sha256=q9QJYn3PhDHzfNO1t5xAStN1XDzTPybFxtorb60KF5k,16289 +networkx/algorithms/planarity.py,sha256=j1SbhjE620Jda4PHALJh6PIwu_GHJKwACHnB6_q02aA,39476 +networkx/algorithms/polynomials.py,sha256=WNcCyqedQTPhhiwp9TJRqZXuo4KGTPjb85GF86XvDcU,11270 +networkx/algorithms/reciprocity.py,sha256=vKwcggMOSOz29-_j0R3do52qosaYFbfJJBXOkW-5jH8,2846 +networkx/algorithms/regular.py,sha256=0zEftUGLYqxeUEAV2cerg6bda98fQuqrDyKkNdkvvpo,6680 +networkx/algorithms/richclub.py,sha256=EP7v7VT6GhNAqFQ15I-WeDdksFISBArkS-guAPpdJoo,4166 +networkx/algorithms/shortest_paths/__init__.py,sha256=Rmxtsje-mPdQyeYhE8TP2NId-iZEOu4eAsWhVRm2Xqk,285 +networkx/algorithms/shortest_paths/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/shortest_paths/__pycache__/astar.cpython-39.pyc,, +networkx/algorithms/shortest_paths/__pycache__/dense.cpython-39.pyc,, +networkx/algorithms/shortest_paths/__pycache__/generic.cpython-39.pyc,, +networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-39.pyc,, +networkx/algorithms/shortest_paths/__pycache__/weighted.cpython-39.pyc,, +networkx/algorithms/shortest_paths/astar.py,sha256=mb9Z0nmHJhTCuF_tAvS3oiNrpplCNQReunAwrLJcYME,7674 +networkx/algorithms/shortest_paths/dense.py,sha256=xBzv4NHJ-J2ehGnDEgjHJ6a3LidSKYBy2p36qBTdgfI,8151 +networkx/algorithms/shortest_paths/generic.py,sha256=uN1-eUXz6n9R0WWwCgpiynJTfyYyp5RYgCUeroeizWo,25321 +networkx/algorithms/shortest_paths/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/shortest_paths/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_dense.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_generic.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_unweighted.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_weighted.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/test_astar.py,sha256=X9tLO2OVDrHjSy1-nNaNFIRngnOo-ITT5HAj6PAXafk,7176 +networkx/algorithms/shortest_paths/tests/test_dense.py,sha256=ievl4gu3Exl_31hp4OKcsAGPb3g3_xFUM4t3NnvrG_A,6747 +networkx/algorithms/shortest_paths/tests/test_dense_numpy.py,sha256=BNwXCe2wgNPE8o35-shPsFj8l19c_QG6Ye8tkIGphf8,2300 +networkx/algorithms/shortest_paths/tests/test_generic.py,sha256=aR3pUbMS-s3vBZJg7kauoY6rmZbjlx-DweCC1wyZQI4,18156 +networkx/algorithms/shortest_paths/tests/test_unweighted.py,sha256=fjpDkp38DmW8R2qpLRwRjcbYZp4an0f0yIq40XsFKJ8,5899 +networkx/algorithms/shortest_paths/tests/test_weighted.py,sha256=dmzFBYN3QEDZoun7RAtSe_spsGSbvkDiJSgUf9e-1K8,35038 +networkx/algorithms/shortest_paths/unweighted.py,sha256=EuiZiHEQEOrFZpmUMm52ThANjyfWeWhKYFIrnvu9G_s,15494 +networkx/algorithms/shortest_paths/weighted.py,sha256=esTy6BYmWqBcCGN15Ld3jJCfjTNZtw4pyphEK4g2NqQ,82339 +networkx/algorithms/similarity.py,sha256=Y5GRl1NW-IFVqwogOdHJs-SAHmpv1SgfN7MIF7y8odQ,59062 +networkx/algorithms/simple_paths.py,sha256=cliV45VPZSKngEAEI6jQX5o8JD9G7Yhe_kFnCCvZP1k,30535 +networkx/algorithms/smallworld.py,sha256=sD1yv28XfqfwSi0Y88GOKXPzVwMJlWpNf8S14ZLOw_k,13494 +networkx/algorithms/smetric.py,sha256=jHdRFK7HyeFkhQWkKApa1MpAyi512pY4Ksulkh8joo8,1933 +networkx/algorithms/sparsifiers.py,sha256=InQAPhcRTI5O_l9ckBc2LAheONfBVx1Wn6yYY0NVOiA,10073 +networkx/algorithms/structuralholes.py,sha256=58c8f6hBwxhp5-GL1JPWnR5dRLZU3gNziddeFVIeXSE,9319 +networkx/algorithms/summarization.py,sha256=ymt635-uEH_uocXwF0_tU8EbGlW_1wf3ZM-v0CG7dH4,23251 +networkx/algorithms/swap.py,sha256=uYRzbxhEONmGQlWWDsDw_SJQgIWJfXzvSKYQAd50HfE,14579 +networkx/algorithms/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_boundary.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_bridges.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_chains.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_chordal.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_clique.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_cluster.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_communicability.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_core.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_covering.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_cuts.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_cycles.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_d_separation.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_dag.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_distance_measures.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_dominance.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_dominating.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_efficiency.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_euler.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_graph_hashing.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_graphical.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_hierarchy.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_hybrid.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_isolate.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_lowest_common_ancestors.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_matching.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_max_weight_clique.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_mis.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_moral.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_node_classification.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_planar_drawing.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_planarity.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_polynomials.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_reciprocity.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_regular.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_richclub.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_similarity.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_simple_paths.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_smallworld.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_smetric.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_sparsifiers.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_structuralholes.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_summarization.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_swap.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_threshold.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_time_dependent.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_tournament.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_triads.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_vitality.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_voronoi.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_walks.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_wiener.cpython-39.pyc,, +networkx/algorithms/tests/test_asteroidal.py,sha256=DnWI5_jnaaZMxtG44XD0K690HZs8ez7HU_9dSR-p6eA,502 +networkx/algorithms/tests/test_boundary.py,sha256=1OSJh32FYFhAVYB5zqxhZGEXZLS0HPp9kvfHZvWmD3o,6227 +networkx/algorithms/tests/test_bridges.py,sha256=FS34gA5cia8di_a2X4meeB7qI0JrsVtpQlL4fe_i1CA,4027 +networkx/algorithms/tests/test_chains.py,sha256=SofaAxDEJDf1gt5sIGVC_O8vT9YcTc8Jq1vfnwVPhkM,4363 +networkx/algorithms/tests/test_chordal.py,sha256=DPdNPY7KtqCsCwYVb4xQfnIm-z35dUJIWxNHtAiQLAQ,4438 +networkx/algorithms/tests/test_clique.py,sha256=FPIF2f8NLODsz-k_qrHt7DolClV_VdNWSh68oe8-ygI,9413 +networkx/algorithms/tests/test_cluster.py,sha256=AltwLWAblpSLa-24KvNuxYxM2IeVl5p2d-kozA9QJ-0,15595 +networkx/algorithms/tests/test_communicability.py,sha256=4KK9wU9gAUqHAAAyHwAKpq2dV9g415s_X0qd7Tt83gU,2938 +networkx/algorithms/tests/test_core.py,sha256=ZmLePvuK-Tv8aQ6tGCJd9965BHKUviNNVV7o3PzwfEE,7016 +networkx/algorithms/tests/test_covering.py,sha256=EeBjQ5mxcVctgavqXZ255T8ryFocuxjxdVpIxVUNFvw,2718 +networkx/algorithms/tests/test_cuts.py,sha256=2Ir5xyIG4cTC4Dgg1cceLXaEFiOCJ60ZTDDn33vz0Ns,5377 +networkx/algorithms/tests/test_cycles.py,sha256=mrID4F3wdoZV1oBPETd8Ebx9UXasC2dWsccx8bq_5C8,34243 +networkx/algorithms/tests/test_d_separation.py,sha256=md90cCjC409qAolNGTFwGTu5N577GdgK2RYW83lxopk,6600 +networkx/algorithms/tests/test_dag.py,sha256=MMSD9Flgl_h2fCDIr7gPr9MACj-cU6Xnn5qK1-LeCSc,27722 +networkx/algorithms/tests/test_distance_measures.py,sha256=LdVbsbebvMZghK2gOgecNGxs7v_WjO8z2okG9uF8rAY,22327 +networkx/algorithms/tests/test_distance_regular.py,sha256=pPZ2CPKo4QLjhxlcJhBQZif6-_2qwfh1kpbrN_mu5tg,2312 +networkx/algorithms/tests/test_dominance.py,sha256=nPqRGSF1GEvUR16ryo-dOql6fLdTvzBmYk8Y3ML-ONc,9373 +networkx/algorithms/tests/test_dominating.py,sha256=hyta7ln6BbHaGlpEUla6jVzh2PRuSjvujLSGXrmwZbc,1228 +networkx/algorithms/tests/test_efficiency.py,sha256=QKWMvyjCG1Byt-oNp7Rz_qxnVeT77Zk27lrzI1qH0mA,1894 +networkx/algorithms/tests/test_euler.py,sha256=4ajCsO3PwKBaz8jTB_b_nHh_yOz9qSPTOhNBloIRAF8,10987 +networkx/algorithms/tests/test_graph_hashing.py,sha256=duR9DQLUpRuy9bv0ZKQPt9gy9WxiX_K0-BVMlnF-WHY,23517 +networkx/algorithms/tests/test_graphical.py,sha256=uhFjvs04odxABToY4IRig_CaUTpAC3SfZRu1p1T7FwY,5366 +networkx/algorithms/tests/test_hierarchy.py,sha256=g3-0pNfzRo-RDW1BsiLXxyi2LwWIJukXx2i4JCpN2fg,941 +networkx/algorithms/tests/test_hybrid.py,sha256=kQLzaMoqZcKFaJ3D7PKbY2O-FX59XDZ1pN5un8My-tk,720 +networkx/algorithms/tests/test_isolate.py,sha256=LyR0YYHJDH5vppQzGzGiJK-aaIV17_Jmla8dMf93olg,555 +networkx/algorithms/tests/test_link_prediction.py,sha256=7c322xESYdH5WEA0TsMw4Jcc_-lqfIsj-SjXP6Y0TVc,19442 +networkx/algorithms/tests/test_lowest_common_ancestors.py,sha256=GvhYCQMnVYD9LHPCNFgWMAUmOV8V5gko0fe05zi1JwU,13153 +networkx/algorithms/tests/test_matching.py,sha256=jhehNkApE5RuMPtbjWNeHn0tPqhVz65mL7QakfRA3Vw,20174 +networkx/algorithms/tests/test_max_weight_clique.py,sha256=JWGZpbQfUaCklCGI170Gfpp3b5ICYwY7RH_DQ1mYQbc,6741 +networkx/algorithms/tests/test_mis.py,sha256=jusLniyKcNWs0994srLJxY3SVeAQqkkXf-h-qtlrfGw,1875 +networkx/algorithms/tests/test_moral.py,sha256=15PZgkx7O9aXQB1npQ2JNqBBkEqPPP2RfeZzKqY-GNU,452 +networkx/algorithms/tests/test_node_classification.py,sha256=NgJJKUHH1GoD1GE3F4QRYBLM3fUo_En3RNtZvhqCjlg,4663 +networkx/algorithms/tests/test_non_randomness.py,sha256=-8s-fJLYRxVNp7QpaMe5Dxrxi0kvewY78d4ja-nXNBk,782 +networkx/algorithms/tests/test_planar_drawing.py,sha256=CBJv6U9tT0BzYVrmEBlARBZSMxBwTsX3krACAnAPfHg,8771 +networkx/algorithms/tests/test_planarity.py,sha256=h9kUOsn0skbvYBcIYzKy5XDGmyP3sTwtvoXYKr_X230,13148 +networkx/algorithms/tests/test_polynomials.py,sha256=baI0Kua1pRngRC6Scm5gRRwi1bl0iET5_Xxo3AZTP3A,1983 +networkx/algorithms/tests/test_reciprocity.py,sha256=X_PXWFOTzuEcyMWpRdwEJfm8lJOfNE_1rb9AAybf4is,1296 +networkx/algorithms/tests/test_regular.py,sha256=zGf7Mmh7XPtwunOoeTfgiICnfsVeCEbMop3NrDgIfqY,2457 +networkx/algorithms/tests/test_richclub.py,sha256=hhRGQGNQ2EINvmTF-XkJxGZXROvQJZuWwubCYq8Mx9U,2585 +networkx/algorithms/tests/test_similarity.py,sha256=JJYVUV-WtjswW-kDbY5tUuyjLI_3mVKOLUbaRz8wCM8,32216 +networkx/algorithms/tests/test_simple_paths.py,sha256=mmuKfi8t9iXLO8tSIuQGbupFe9c6X6cSiGKEWYiWiqM,24075 +networkx/algorithms/tests/test_smallworld.py,sha256=rfgNCRU6YF55f8sCuA5WmX6MmhDci89Tb4jaz4ALjcQ,2405 +networkx/algorithms/tests/test_smetric.py,sha256=wihpgjZS4PaajOuE72RiDEbBWpQcoKPSAfjoAezuRxg,980 +networkx/algorithms/tests/test_sparsifiers.py,sha256=A12V4ljWxvXaSFJ73mHSFK2YNO-k8ax6Me4yEWTsI4s,4043 +networkx/algorithms/tests/test_structuralholes.py,sha256=-48vhIVXcUlmLAi603FdBP6afbVu447JZ1piSCIpRTE,5536 +networkx/algorithms/tests/test_summarization.py,sha256=cGAep6r-v141uAdsPF9r8YTuT-nO7L7puOqPPv339wo,21313 +networkx/algorithms/tests/test_swap.py,sha256=YRpN79MNL1i5Hm2FVb-mNl9SRfHDWAuDnn2Wx95_UYY,5307 +networkx/algorithms/tests/test_threshold.py,sha256=RF_SM5tdMGJfEHETO19mFicnt69UIlvVeuCwI7rxb0M,9751 +networkx/algorithms/tests/test_time_dependent.py,sha256=NmuV2kDo4nh2MeN0hwcJf0QSDtqMD0dfSeeKSsYBtQ8,13342 +networkx/algorithms/tests/test_tournament.py,sha256=xxmLb9Lrmjkh9tKmyv2yYJrhB2PHWh-Bq71M-d1NjQo,4158 +networkx/algorithms/tests/test_triads.py,sha256=tPMzSQDVHZQOmDOKa9Hyem76UO1zh7wcdaM9X_BhxG4,9088 +networkx/algorithms/tests/test_vitality.py,sha256=p5lPWCtVMtbvxDw6TJUaf8vpb0zKPoz5pND722xiypQ,1380 +networkx/algorithms/tests/test_voronoi.py,sha256=M4B6JtkJUw56ULEWRs1kyVEUsroNrnb5FBq9OioAyHM,3477 +networkx/algorithms/tests/test_walks.py,sha256=X8cb-YvGHiiqbMEXuKMSdTAb9WtVtbHjIESNSqpJTmU,1499 +networkx/algorithms/tests/test_wiener.py,sha256=NJJbXZ9L5ZeFGQpCpvYVWFNqyX3amkbuDQEBL7wCixw,2080 +networkx/algorithms/threshold.py,sha256=1GUMnQQvN_6mR5oowUbgoykaWPyV4zO1mO1Dyb9NdAE,31088 +networkx/algorithms/time_dependent.py,sha256=73WgWETl4IP4qsOrSDkIEIU7MJM3qc_j-LKRZUJlC4c,5757 +networkx/algorithms/tournament.py,sha256=nTgdIzkkFhIyie_5jDh97USz2jQx9TgY34vHOPVXkN0,11676 +networkx/algorithms/traversal/__init__.py,sha256=YtFrfNjciqTOI6jGePQaJ01tRSEQXTHqTGGNhDEDb_8,142 +networkx/algorithms/traversal/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/traversal/__pycache__/beamsearch.cpython-39.pyc,, +networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-39.pyc,, +networkx/algorithms/traversal/__pycache__/depth_first_search.cpython-39.pyc,, +networkx/algorithms/traversal/__pycache__/edgebfs.cpython-39.pyc,, +networkx/algorithms/traversal/__pycache__/edgedfs.cpython-39.pyc,, +networkx/algorithms/traversal/beamsearch.py,sha256=ABR8pOl4G3CbUyaHpR9F93jh3J0XjVKZe7DK5LetVWc,3424 +networkx/algorithms/traversal/breadth_first_search.py,sha256=2sIfMwqc2qjGW2W3UcGwGBSCcc-_FWWa_ZRYQjhfro8,18107 +networkx/algorithms/traversal/depth_first_search.py,sha256=aJ-3wtaLVLslz5dJD7nW3biDdXsjJi_rAJB8QQTg_8w,13730 +networkx/algorithms/traversal/edgebfs.py,sha256=2eUhaoP2__-QSkDu1ie6wPd1WjuK3u8pU8rf87iAtKc,6239 +networkx/algorithms/traversal/edgedfs.py,sha256=NaZfoYV5jl8mmo5UhgdQqM2YJ_PjZ--VKYF5T9qa6Ms,5952 +networkx/algorithms/traversal/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-39.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_bfs.cpython-39.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_dfs.cpython-39.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-39.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-39.pyc,, +networkx/algorithms/traversal/tests/test_beamsearch.py,sha256=b1fXCI0_BuWbnA536PZrXMMUfG1ejnHX1fpQGY-5hqI,1076 +networkx/algorithms/traversal/tests/test_bfs.py,sha256=fC6HUKzd5Jd9LerxgODpfvCRE15BU5PbMzEaMLoXPZs,6796 +networkx/algorithms/traversal/tests/test_dfs.py,sha256=4Gc1ACJQJ63rfOlPz0X0Tv6xW6k83ewMRVojBEnKMmk,8616 +networkx/algorithms/traversal/tests/test_edgebfs.py,sha256=8oplCu0fct3QipT0JB0-292EA2aOm8zWlMkPedfe6iY,4702 +networkx/algorithms/traversal/tests/test_edgedfs.py,sha256=HGmC3GUYSn9XLMHQpdefdE6g-Uh3KqbmgEEXBcckdYc,4775 +networkx/algorithms/tree/__init__.py,sha256=wm_FjX3G7hqJfyNmeEaJsRjZI-8Kkv0Nb5jAmQNXzSc,149 +networkx/algorithms/tree/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/tree/__pycache__/branchings.cpython-39.pyc,, +networkx/algorithms/tree/__pycache__/coding.cpython-39.pyc,, +networkx/algorithms/tree/__pycache__/decomposition.cpython-39.pyc,, +networkx/algorithms/tree/__pycache__/mst.cpython-39.pyc,, +networkx/algorithms/tree/__pycache__/operations.cpython-39.pyc,, +networkx/algorithms/tree/__pycache__/recognition.cpython-39.pyc,, +networkx/algorithms/tree/branchings.py,sha256=ZJmoLcn_rtQ9VKBwsFqdx1Rkh8edqjNGgYWBUezNpvE,56003 +networkx/algorithms/tree/coding.py,sha256=zB5ISLd1Jn-6wWfqrQaxYa2hbwchp59qzGKmCiuLVd8,13407 +networkx/algorithms/tree/decomposition.py,sha256=MFV3zHYOt8y7n3jNBxQCQvO60IFJ9rl5XIgvBqjT5RQ,3047 +networkx/algorithms/tree/mst.py,sha256=STCiAXhYmbt3x0yaqRQehwlp_GMJFoeU9vwdmzjFHNk,40276 +networkx/algorithms/tree/operations.py,sha256=46nnbX2qF_iyzeuzsIJXrlXiJOEQfo2KguFB4rP5Ttg,4702 +networkx/algorithms/tree/recognition.py,sha256=ZOdFP-cdG2Lv7jiX3M2nPi7g-e327wPN36_xbn4KiDs,7553 +networkx/algorithms/tree/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/tree/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_branchings.cpython-39.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-39.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_decomposition.cpython-39.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-39.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-39.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-39.pyc,, +networkx/algorithms/tree/tests/test_branchings.py,sha256=chEEI0evEkVVJSphcP_kIqhMEdv0nhBONYGP8ffW40s,18008 +networkx/algorithms/tree/tests/test_coding.py,sha256=f3A5dvfkWImC6Jp2qkuw2Sz3whOsabnaOfu6Eh9r65I,3954 +networkx/algorithms/tree/tests/test_decomposition.py,sha256=vnl_xoQzi1LnlZL25vXOZWwvaWmon3-x222OKt4eDqE,1871 +networkx/algorithms/tree/tests/test_mst.py,sha256=NgvEi2kwn18HN8ywvj1V20pZS98JIuo9vJOv31DqW2w,24749 +networkx/algorithms/tree/tests/test_operations.py,sha256=ybU96kROTVJRTyjLG7JSJjYlPxaWmYjUVJqbXV5VGGI,1961 +networkx/algorithms/tree/tests/test_recognition.py,sha256=hbS6q1lbshRClWH7o8Zj7Osd-TZuk_YOomUdTczHs3s,4171 +networkx/algorithms/triads.py,sha256=4EF74EolTV1PJheA-J371tP_pTJA7-aGLnq4D8pac2E,15517 +networkx/algorithms/vitality.py,sha256=yBXiKzewpz40SK0UzFpeZZDvOXXDSt_vh4jGLrNgnHg,2331 +networkx/algorithms/voronoi.py,sha256=zCiFFQiklBS5afAEfln44GfiPsoAI9c7E6m0gbQ2Big,3178 +networkx/algorithms/walks.py,sha256=3ulDmITV-YN8yBTr2qmVEDsSe-Gi8yzrnGyLP5dXQbQ,2419 +networkx/algorithms/wiener.py,sha256=o8tYpFn-W9sneu3QMhpMZlCkqptgRo2ZdUFKfVCOL1s,2328 +networkx/classes/__init__.py,sha256=Q9oONJrnTFs874SGpwcbV_kyJTDcrLI69GFt99MiE6I,364 +networkx/classes/__pycache__/__init__.cpython-39.pyc,, +networkx/classes/__pycache__/coreviews.cpython-39.pyc,, +networkx/classes/__pycache__/digraph.cpython-39.pyc,, +networkx/classes/__pycache__/filters.cpython-39.pyc,, +networkx/classes/__pycache__/function.cpython-39.pyc,, +networkx/classes/__pycache__/graph.cpython-39.pyc,, +networkx/classes/__pycache__/graphviews.cpython-39.pyc,, +networkx/classes/__pycache__/multidigraph.cpython-39.pyc,, +networkx/classes/__pycache__/multigraph.cpython-39.pyc,, +networkx/classes/__pycache__/reportviews.cpython-39.pyc,, +networkx/classes/coreviews.py,sha256=jkbsDaqebCcFH952hAAAuXw3qZpi7xUdCHyKbUrFsc8,11010 +networkx/classes/digraph.py,sha256=CnnSfxWTjOkabNkSNhBfE2bgwgSG32ylDD3scWxaPP0,47159 +networkx/classes/filters.py,sha256=47OFApfkvvohVMoZ2v9sniM6sgv9rka869BDwmbdww4,1715 +networkx/classes/function.py,sha256=55dS0xS5p7oiyeJQ03mGcSo7NwJLbfZ54_kIhVOx-q0,36323 +networkx/classes/graph.py,sha256=DigVc4mmBx9v1ctgqhDYu8GZwT2wa4iyci85otpYZ6Q,70379 +networkx/classes/graphviews.py,sha256=7rSoE4Pkh8SjjsP2G6t0U0SRAwnkUp5QLSgnJPZgPUQ,8558 +networkx/classes/multidigraph.py,sha256=_5yJvVz99QMkp92iS2qONcahEBhsmS3C7bfaKrNNKoA,36283 +networkx/classes/multigraph.py,sha256=5X1_tB0LJgfif8f1HckiL1MTm9tPDJ4YTRe_H22WFbA,47127 +networkx/classes/reportviews.py,sha256=WQU6LBq2tXIohU6bDJALKeefFaeGMW46Ln8sUJoJ-yM,45606 +networkx/classes/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/classes/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/classes/tests/__pycache__/dispatch_interface.cpython-39.pyc,, +networkx/classes/tests/__pycache__/historical_tests.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_backends.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_coreviews.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_digraph.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_digraph_historical.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_filters.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_function.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_graph.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_graph_historical.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_graphviews.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_multidigraph.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_multigraph.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_reportviews.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_special.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_subgraphviews.cpython-39.pyc,, +networkx/classes/tests/dispatch_interface.py,sha256=bOQdru35uGmAMKHNZEok-UBAeiuA0rF20JZEeo4ePT4,6683 +networkx/classes/tests/historical_tests.py,sha256=3lbZKaRvv8uodIEzSbBJDguTPpO2MhqBqh-Pk1soZBM,16173 +networkx/classes/tests/test_backends.py,sha256=IR8qFYv4cJX0y1q48lRV0zJEouj1sfSOmcwcm4FBxEg,2579 +networkx/classes/tests/test_coreviews.py,sha256=qzdozzWK8vLag-CAUqrXAM2CZZwMFN5vMu6Tdrwdf-E,12128 +networkx/classes/tests/test_digraph.py,sha256=uw0FuEu3y_YI-PSGuQCRytFpXLF7Eye2fqLJaKbXkBc,12283 +networkx/classes/tests/test_digraph_historical.py,sha256=s9FpuIP81zIbGCiMfiDqB3OxqWU2p3GwWdhpGIOjD5Y,3683 +networkx/classes/tests/test_filters.py,sha256=fBLig8z548gsBBlQw6VJdGZb4IcqJj7_0mi2Fd2ncEM,5851 +networkx/classes/tests/test_function.py,sha256=e5vg_SjtC8nHrMDemCcEVTHcudcSiWToMsZF655eQi4,25770 +networkx/classes/tests/test_graph.py,sha256=77t7pk1Pmz-txewyD2Dv19Vva6vWpWCtJSPtFx-EY_Y,30913 +networkx/classes/tests/test_graph_historical.py,sha256=-jf961vQCuQLyly0ju50q9dbzWG5m2OAs9H6IVS670c,273 +networkx/classes/tests/test_graphviews.py,sha256=i4x3ii8--PPg_pK4YA8aMR1axUQCdXZYpzmB05iEAOg,11466 +networkx/classes/tests/test_multidigraph.py,sha256=ryTKegCoYixXbAqOn3mIt9vSMb5666Dv-pfMkXEjoUE,16342 +networkx/classes/tests/test_multigraph.py,sha256=0vFQO3RCJaBpzXvnQzdWa_qYLHNo_I9DICYhPZJNUMk,18777 +networkx/classes/tests/test_reportviews.py,sha256=-4Vd42cOvTdZfsPiWuQuAAvVDafosjB47RosYglmXUw,41470 +networkx/classes/tests/test_special.py,sha256=IJsmqCS9LrTDoZ11KPmo-UOI7xEskL7NyduEJNPMNqs,4103 +networkx/classes/tests/test_subgraphviews.py,sha256=1dcJHq3F00LyoFSu6CTFPqS7DFIkWK1PyQu4QvJh5ko,13223 +networkx/conftest.py,sha256=v1f_RONRmJwf__rPjF4l-uFl2cyyF08mtst8WTnHXr4,7944 +networkx/convert.py,sha256=0EbsMbOgm8pdKpW4A6p5Ea4T_KGEcq-P1a0gCnM-jzY,15977 +networkx/convert_matrix.py,sha256=cnI56RdwsBke_XOz6dAbfgj_qVIwU2JeirRqMN9tM2w,41069 +networkx/drawing/__init__.py,sha256=rnTFNzLc4fis1hTAEpnWTC80neAR88-llVQ-LObN-i4,160 +networkx/drawing/__pycache__/__init__.cpython-39.pyc,, +networkx/drawing/__pycache__/layout.cpython-39.pyc,, +networkx/drawing/__pycache__/nx_agraph.cpython-39.pyc,, +networkx/drawing/__pycache__/nx_latex.cpython-39.pyc,, +networkx/drawing/__pycache__/nx_pydot.cpython-39.pyc,, +networkx/drawing/__pycache__/nx_pylab.cpython-39.pyc,, +networkx/drawing/layout.py,sha256=fvfHjubEdZZoRexknTKF2j0zO_Xj2N1nhzfuNTKY5BQ,38829 +networkx/drawing/nx_agraph.py,sha256=9Q6bz0oT7u-iU2hDIjhjvQ3jmxWsyAHA5fMIr4BmhEE,14009 +networkx/drawing/nx_latex.py,sha256=EZWQ1GJ9SWS7ufyAz8ey30gG1EtnCUVMgbLJ4-tIjgY,24805 +networkx/drawing/nx_pydot.py,sha256=vS_lJC9ASmBPyXpecinbNs_OuhjgUNIj82CLks3SjPk,14135 +networkx/drawing/nx_pylab.py,sha256=LOTf6wOfOkB_t-ml5EY-6SEOfMYAxdDzE-4uGJAxBuo,51138 +networkx/drawing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/drawing/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/drawing/tests/__pycache__/test_agraph.cpython-39.pyc,, +networkx/drawing/tests/__pycache__/test_latex.cpython-39.pyc,, +networkx/drawing/tests/__pycache__/test_layout.cpython-39.pyc,, +networkx/drawing/tests/__pycache__/test_pydot.cpython-39.pyc,, +networkx/drawing/tests/__pycache__/test_pylab.cpython-39.pyc,, +networkx/drawing/tests/baseline/test_house_with_colors.png,sha256=FQi9pIRFwjq4gvgB8cDdBHL5euQUJFw6sQlABf2kRVo,21918 +networkx/drawing/tests/test_agraph.py,sha256=7qDwr3AruwHxoSUGNRACyL5OTK7_2qDM5bkCANSMql4,9045 +networkx/drawing/tests/test_latex.py,sha256=_Wng73kMltC-_sUoxdo2uBL2bkEc7HMqkKhwo9ZDJGA,8710 +networkx/drawing/tests/test_layout.py,sha256=JUHMitAFs28rgYcEvk3-8-8Ri0Qui9ir3DjY24L6MfU,17841 +networkx/drawing/tests/test_pydot.py,sha256=-eRn39-HTFAZ1oVMR6ULSZxCS2531-HrlNjQaUa3i-8,6242 +networkx/drawing/tests/test_pylab.py,sha256=hlStKEitfl74u2wnEYlajmWHVd0IwHdyFfxDcgooUfc,27576 +networkx/exception.py,sha256=5v8tPTpYcuu3OFgSitgC8-wMUGNwfgxZog2gsBNeRPk,3537 +networkx/generators/__init__.py,sha256=tEbG2IO2NkxVzAFjeApCpATxhjRYopOXes6iffqC6DI,1318 +networkx/generators/__pycache__/__init__.cpython-39.pyc,, +networkx/generators/__pycache__/atlas.cpython-39.pyc,, +networkx/generators/__pycache__/classic.cpython-39.pyc,, +networkx/generators/__pycache__/cographs.cpython-39.pyc,, +networkx/generators/__pycache__/community.cpython-39.pyc,, +networkx/generators/__pycache__/degree_seq.cpython-39.pyc,, +networkx/generators/__pycache__/directed.cpython-39.pyc,, +networkx/generators/__pycache__/duplication.cpython-39.pyc,, +networkx/generators/__pycache__/ego.cpython-39.pyc,, +networkx/generators/__pycache__/expanders.cpython-39.pyc,, +networkx/generators/__pycache__/geometric.cpython-39.pyc,, +networkx/generators/__pycache__/harary_graph.cpython-39.pyc,, +networkx/generators/__pycache__/internet_as_graphs.cpython-39.pyc,, +networkx/generators/__pycache__/intersection.cpython-39.pyc,, +networkx/generators/__pycache__/interval_graph.cpython-39.pyc,, +networkx/generators/__pycache__/joint_degree_seq.cpython-39.pyc,, +networkx/generators/__pycache__/lattice.cpython-39.pyc,, +networkx/generators/__pycache__/line.cpython-39.pyc,, +networkx/generators/__pycache__/mycielski.cpython-39.pyc,, +networkx/generators/__pycache__/nonisomorphic_trees.cpython-39.pyc,, +networkx/generators/__pycache__/random_clustered.cpython-39.pyc,, +networkx/generators/__pycache__/random_graphs.cpython-39.pyc,, +networkx/generators/__pycache__/small.cpython-39.pyc,, +networkx/generators/__pycache__/social.cpython-39.pyc,, +networkx/generators/__pycache__/spectral_graph_forge.cpython-39.pyc,, +networkx/generators/__pycache__/stochastic.cpython-39.pyc,, +networkx/generators/__pycache__/sudoku.cpython-39.pyc,, +networkx/generators/__pycache__/time_series.cpython-39.pyc,, +networkx/generators/__pycache__/trees.cpython-39.pyc,, +networkx/generators/__pycache__/triads.cpython-39.pyc,, +networkx/generators/atlas.dat.gz,sha256=c_xBbfAWSSNgd1HLdZ9K6B3rX2VQvyW-Wcht47dH5B0,8887 +networkx/generators/atlas.py,sha256=NG5jMwud76LrkzBCl9tNGrv0j3weSi4-A_wtPChdglY,5557 +networkx/generators/classic.py,sha256=hmHiIsSsld_DVY8lpULBF8bGRLYDnhCWDD-Y_8u4o-k,28395 +networkx/generators/cographs.py,sha256=oDJVZRiviNZOrHSGXxQNcXFbRvtB3DW0aFFEJ8bfQ_Y,1866 +networkx/generators/community.py,sha256=bgbD7UfKYXf9lSUe9WJeo20pioNJJMyh1ZpenYOex6E,34690 +networkx/generators/degree_seq.py,sha256=0Zpv4q5rYgrUepprOGaQFTw3PVg6EEWobo9i3Ira9ZI,30006 +networkx/generators/directed.py,sha256=qlm-ArjKbqnGWJi2Ax9HuqaUQm_IUFPuwwDYvAUW4o8,15554 +networkx/generators/duplication.py,sha256=1Ys4nb9suq49ooAVXSnkuKQjvW3H_llprd-xdp70wSs,5013 +networkx/generators/ego.py,sha256=V373eWi-qyJXxgsa49NdLyVqFVCpqC-CMY9ATwQYws8,1873 +networkx/generators/expanders.py,sha256=xDfzdnl2XYrVItnCiU_09szMKQB6S9xOaxWx6rFTHCE,6447 +networkx/generators/geometric.py,sha256=8slijaPVv36oTzU5EsKx0bqtLmgICpbV5zl0vh7MfM8,30858 +networkx/generators/harary_graph.py,sha256=-9SU_IDZklbmiz4bwyCOPbQ4DEnXHwKStim2fHbPz6A,6111 +networkx/generators/internet_as_graphs.py,sha256=tMXL8U9YCAerfYYOV0wpEjPL16HIAJpEOzL8GdZAjxw,14148 +networkx/generators/intersection.py,sha256=WW3yE7TbjDtjggrLU8UysmiY_ip6tLy_hggUYWXVRy8,4028 +networkx/generators/interval_graph.py,sha256=LuXcLfLiRToVhmwz6nz1yWKivzqujFOB1MNZ0WRgnmg,2213 +networkx/generators/joint_degree_seq.py,sha256=p49dvZCcda6NAEsz4zvbiogIw4deqT51HpgIb8rU2zY,24717 +networkx/generators/lattice.py,sha256=8yw3GgxEH2erK2xBCW3oMT2hT2R5eCJC9OcBKz-R-vs,13380 +networkx/generators/line.py,sha256=R5Nz58zz_Fq8LbbLPEDRrYbCGLlbaRj2htmRZJbEv3M,17500 +networkx/generators/mycielski.py,sha256=0f_XLLjdpKsV_VS31PESe6LGg4yJI7nyX4DUWtyMU0U,3266 +networkx/generators/nonisomorphic_trees.py,sha256=I6T3QaH-Asw1B5g0_jnQJO9tOydc4qEbSmNIFUvq6ik,5232 +networkx/generators/random_clustered.py,sha256=4msP74PFIQvmWCxIs7F9WqYWvOTqDXXSf1wSuExEXF8,4159 +networkx/generators/random_graphs.py,sha256=SSYM8SZWgRglwMI6vxBWukDnQDJEk_9lCFKWMm_NkjA,44729 +networkx/generators/small.py,sha256=ADc-LfX8aEg_UYkPVfZk9YN4Na2KxpZxHsSvnVhj5D8,27217 +networkx/generators/social.py,sha256=caDTR23cNv1nlA5j3OZvs3d4HuwExxNbjfO9Ijqv7qY,22867 +networkx/generators/spectral_graph_forge.py,sha256=PPU6w9Z0_6iKQODsXSCE4ftVMYNrECBeH1Q_Y5Ea95U,4217 +networkx/generators/stochastic.py,sha256=K_4B3xmc6EDTQkc53eaOK1wHzF6qijAogWmEMDIjwjc,1897 +networkx/generators/sudoku.py,sha256=l-j2mo0KLarWRvMmjncZZfxJzoYn9ZGuz_3rlGZ5CRM,4264 +networkx/generators/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/generators/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_atlas.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_classic.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_cographs.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_community.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_degree_seq.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_directed.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_duplication.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_ego.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_expanders.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_geometric.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_harary_graph.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_internet_as_graphs.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_intersection.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_interval_graph.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_joint_degree_seq.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_lattice.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_line.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_mycielski.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_nonisomorphic_trees.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_random_clustered.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_random_graphs.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_small.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_stochastic.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_sudoku.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_time_series.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_trees.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_triads.cpython-39.pyc,, +networkx/generators/tests/test_atlas.py,sha256=nwXJL4O5jUqhTwqhkPxHY8s3KXHQTDEdsfbg4MsSzVQ,2530 +networkx/generators/tests/test_classic.py,sha256=RAELDkMqAVYaIo1nhBbjDbmAfzSrn6RBqSSd--mWqvs,22495 +networkx/generators/tests/test_cographs.py,sha256=DkiQzP69sjw3QtjWVX2XV0EXoOuEvR42dixPWwuawSE,460 +networkx/generators/tests/test_community.py,sha256=FGcDo3Ajb-yYc5kUkFbVfOJVMG-YppbAtjgBPcVzjLc,11311 +networkx/generators/tests/test_degree_seq.py,sha256=in6lg1pwcAg1N08MA3lQdr3lnm2-aoUy3BRm6Yj_OBQ,7093 +networkx/generators/tests/test_directed.py,sha256=00widU8dJGkdnU_b6-ZxL8KGtx-gSh4sRG7cwbMHvjQ,5258 +networkx/generators/tests/test_duplication.py,sha256=IIzcHEfHp0NHsH7GTXSb4E4kgXAlt83q4IMibfx2FBw,1915 +networkx/generators/tests/test_ego.py,sha256=8v1Qjmkli9wIhhUuqzgqCzysr0C1Z2C3oJMCUoNvgY4,1327 +networkx/generators/tests/test_expanders.py,sha256=O6O68S5VFWycg-Ml-gvZXjX_vjwO0MBfaBcNYIBP9Io,2896 +networkx/generators/tests/test_geometric.py,sha256=1wzo-eTOP937aOEk8lQGrsW7u2BDfRhyYQ1HpHQqslQ,12512 +networkx/generators/tests/test_harary_graph.py,sha256=U5GfsoekBwVwTGMvk33e2eFOzHEL4czRIWv57j3nt_g,4937 +networkx/generators/tests/test_internet_as_graphs.py,sha256=QmzkOnWg9bcSrv31UcaD6Cko55AV-GPLLY5Aqb_Dmvs,6795 +networkx/generators/tests/test_intersection.py,sha256=hcIit5fKfOn3VjMhz9KqovZK9tzxZfmC6ezvA7gZAvM,819 +networkx/generators/tests/test_interval_graph.py,sha256=-1yXDZDW-ygmNva9Bu-TsS_SYGLcW1KJplwZHFFYyWM,4278 +networkx/generators/tests/test_joint_degree_seq.py,sha256=8TXTZI3Um2gBXtP-4yhGKf9vCi78-NVmWZw9r9WG3F8,4270 +networkx/generators/tests/test_lattice.py,sha256=q4Ri-dH9mKhfq0PNX9xMeYRUiP0JlPBr7piSruZlFlg,9290 +networkx/generators/tests/test_line.py,sha256=vXncJuny2j5ulCJyT01Rt1tTwPib4XelS3dJDdJXjx0,10378 +networkx/generators/tests/test_mycielski.py,sha256=cAg2J6o_RrbwEdAc0vCuSF6zeS6w1KT4leTM0vkIeoA,822 +networkx/generators/tests/test_nonisomorphic_trees.py,sha256=Y_qWyj_qZU9O_DC4BHEVD9xnIEALCmfdmZAYJjTxUYE,2384 +networkx/generators/tests/test_random_clustered.py,sha256=LTfigb1swnYWS59OJoBmNcjFcUjsodnHVOwFxBXl7xg,979 +networkx/generators/tests/test_random_graphs.py,sha256=DKEPbvKiFzZQsuofuj_MphGX2KJ8Bvz6ofIttDGMANk,13121 +networkx/generators/tests/test_small.py,sha256=u_CTdGXfwnqvIYWjYv8VX_r_KB5Y1aCxXxQkxhx-WHs,6906 +networkx/generators/tests/test_spectral_graph_forge.py,sha256=x4jyTiQiydaUPWYaGsNFsIB47PAzSSwQYCNXGa2B4SU,1594 +networkx/generators/tests/test_stochastic.py,sha256=xdytPcz4ETnuqGtjMr0CI3zR4xWJqi91Zxbkly8Ijf8,2178 +networkx/generators/tests/test_sudoku.py,sha256=dgOmk-B7MxCVkbHdZzsLZppQ61FAArVy4McSVL8Afzo,1968 +networkx/generators/tests/test_time_series.py,sha256=74kHpcBfbed7zmd1Ofh2XoLIhIaEEFpEf51j1e2muMo,2229 +networkx/generators/tests/test_trees.py,sha256=hv8oNYZOcYcaARXvaMQZptCVBvk-huk-nKI5mH9sB-8,7634 +networkx/generators/tests/test_triads.py,sha256=mgpHFf0Z34CqtnXgkdf7gK1dC77ppYAqwviXsaU1HVs,332 +networkx/generators/time_series.py,sha256=Jz33n3mprkLrVbwLRLznQg_lDQvmIL2jWNoY4LCla80,2414 +networkx/generators/trees.py,sha256=xR9H01HkI1WR24KdSEv8inTz6rgbzjwt1vL0GfOImts,39067 +networkx/generators/triads.py,sha256=bXfoxFUH9CJaO7PMxjwiB9SvhPp4ZL3HxlUsL-67Yv4,2233 +networkx/lazy_imports.py,sha256=MDfQ4C99G30uYBmLJBGtGEcrOzHMUcxvOyZpN674DYw,5784 +networkx/linalg/__init__.py,sha256=7iyNZ_YYBnlsW8zSfhUgvEkywOrUWfpIuyS86ZOKlG8,568 +networkx/linalg/__pycache__/__init__.cpython-39.pyc,, +networkx/linalg/__pycache__/algebraicconnectivity.cpython-39.pyc,, +networkx/linalg/__pycache__/attrmatrix.cpython-39.pyc,, +networkx/linalg/__pycache__/bethehessianmatrix.cpython-39.pyc,, +networkx/linalg/__pycache__/graphmatrix.cpython-39.pyc,, +networkx/linalg/__pycache__/laplacianmatrix.cpython-39.pyc,, +networkx/linalg/__pycache__/modularitymatrix.cpython-39.pyc,, +networkx/linalg/__pycache__/spectrum.cpython-39.pyc,, +networkx/linalg/algebraicconnectivity.py,sha256=CScaTuN7V1BfEBPA8LSSe1FLV7uE-Am8Iaf9Q9Crgtw,21106 +networkx/linalg/attrmatrix.py,sha256=93xWJq-tIvayQTEUp7UaHLmAQQpSPYMHKF4KqPEfSUE,15504 +networkx/linalg/bethehessianmatrix.py,sha256=sNCKJoRe9jidTrqdX5I8mwl1t08GKzaL14R4ujk42Vk,2692 +networkx/linalg/graphmatrix.py,sha256=TRbhk2cHeJtKW15Gl-lGD7Ih1M3eHkSOC7yoerb7WBU,5513 +networkx/linalg/laplacianmatrix.py,sha256=WQJnmPEVsDTL8kUfQmqg-GJEPeHx3HO4PQLTO-XUp44,13330 +networkx/linalg/modularitymatrix.py,sha256=gtFN_MajMpZlvfsUqzBygLS6l5hZKi2_g1YqzPPedJw,4698 +networkx/linalg/spectrum.py,sha256=oxo9HSRM6jP2g8hKT1lk54Yinhr38AcanSK6RwHnDG0,4194 +networkx/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/linalg/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_algebraic_connectivity.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_attrmatrix.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_bethehessian.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_graphmatrix.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_laplacian.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_modularity.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_spectrum.cpython-39.pyc,, +networkx/linalg/tests/test_algebraic_connectivity.py,sha256=Kj2ct6gQ71xXFP7usAbFLJxD7ZdtTzneHiFJQOoVCUQ,13737 +networkx/linalg/tests/test_attrmatrix.py,sha256=XD3YuPc5yXKWbhwVSI8YiV_wABWM-rLtwf1uwwWlnI0,2833 +networkx/linalg/tests/test_bethehessian.py,sha256=0r-Do902ywV10TyqTlIJ2Ls3iMqM6sSs2PZbod7kWBM,1327 +networkx/linalg/tests/test_graphmatrix.py,sha256=e5YSH9ih1VL64nnYgZFDvLyKbP3BFqpp0jY6t-8b2eY,8708 +networkx/linalg/tests/test_laplacian.py,sha256=K8p2upJTJLfNHfAf0B9ohPXBZ4k_2VMpSvIc-jXZ_rM,9934 +networkx/linalg/tests/test_modularity.py,sha256=mfKUvwc3bj6Rud1aG4oK3Eu1qg12o6cB8-pv5ZFicYY,3115 +networkx/linalg/tests/test_spectrum.py,sha256=agP2DsiEIvtkNUkT94mdPtJjwnobnjMTUOwjIQa4giA,2828 +networkx/readwrite/__init__.py,sha256=iHycAh1rjr4bCPQMNiHiqm8cP3iu-g1v_uKiGZtkuXY,562 +networkx/readwrite/__pycache__/__init__.cpython-39.pyc,, +networkx/readwrite/__pycache__/adjlist.cpython-39.pyc,, +networkx/readwrite/__pycache__/edgelist.cpython-39.pyc,, +networkx/readwrite/__pycache__/gexf.cpython-39.pyc,, +networkx/readwrite/__pycache__/gml.cpython-39.pyc,, +networkx/readwrite/__pycache__/graph6.cpython-39.pyc,, +networkx/readwrite/__pycache__/graphml.cpython-39.pyc,, +networkx/readwrite/__pycache__/leda.cpython-39.pyc,, +networkx/readwrite/__pycache__/multiline_adjlist.cpython-39.pyc,, +networkx/readwrite/__pycache__/p2g.cpython-39.pyc,, +networkx/readwrite/__pycache__/pajek.cpython-39.pyc,, +networkx/readwrite/__pycache__/sparse6.cpython-39.pyc,, +networkx/readwrite/__pycache__/text.cpython-39.pyc,, +networkx/readwrite/adjlist.py,sha256=P8W_dQu-1NQCC8FX-Zpyta_b0L-NuruRE4X-GKSWNSQ,8386 +networkx/readwrite/edgelist.py,sha256=qkS9reBZWrSviBp2ZkUMt5gEo4FbFcBfQXhO14OSI-E,14160 +networkx/readwrite/gexf.py,sha256=UZPVSIlQNH_t0KNTEjclEcJ96FdeLWLVQ84f7u1NoDM,39668 +networkx/readwrite/gml.py,sha256=NK9jDDQhShmf70wrH2P2hNV59KdFUbC866vhvRc_qV8,31104 +networkx/readwrite/graph6.py,sha256=P2jrsgiX75XGMCK4wyIGpXo8snOJ--ZZ6r6N5m1jusE,11355 +networkx/readwrite/graphml.py,sha256=YIxHdP3zXFixYb_rVL051-yyfN9rJwFs_D1tB39NH8w,39183 +networkx/readwrite/json_graph/__init__.py,sha256=31_5zVLXYEZkjOB-TKXZ5bi83JybPWgpCaRKOXIGoOA,676 +networkx/readwrite/json_graph/__pycache__/__init__.cpython-39.pyc,, +networkx/readwrite/json_graph/__pycache__/adjacency.cpython-39.pyc,, +networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-39.pyc,, +networkx/readwrite/json_graph/__pycache__/node_link.cpython-39.pyc,, +networkx/readwrite/json_graph/__pycache__/tree.cpython-39.pyc,, +networkx/readwrite/json_graph/adjacency.py,sha256=QAUoN4LI5ehEjBv__T_hpmHA2n0eHuE3f3OwQv4kiqA,4692 +networkx/readwrite/json_graph/cytoscape.py,sha256=1UqpoAB-96c4sFGKqTjCziInavrHcFJRHFQo4_iits4,5234 +networkx/readwrite/json_graph/node_link.py,sha256=8ujpdgQapwClUautmCOF3Up3JoFiLgZRHu6PUxF1B7Q,7450 +networkx/readwrite/json_graph/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-39.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-39.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-39.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-39.pyc,, +networkx/readwrite/json_graph/tests/test_adjacency.py,sha256=jueQE3Z_W5BZuCjr0hEsOWSfoQ2fP51p0o0m7IcXUuE,2456 +networkx/readwrite/json_graph/tests/test_cytoscape.py,sha256=vFoDzcSRI9THlmp4Fu2HHhIF9AUmECWs5mftVWjaWWs,2044 +networkx/readwrite/json_graph/tests/test_node_link.py,sha256=bDe2Vv1M4h0IDbKjS482p8ZE7SZtBfHDgZ1OEPibwoo,4536 +networkx/readwrite/json_graph/tests/test_tree.py,sha256=zBXv3_db2XGxFs3XQ35btNf_ku52aLXXiHZmmX4ixAs,1352 +networkx/readwrite/json_graph/tree.py,sha256=ETjeYnUMyqZ0PbvSR97ar9rJnH8ncLI_Qd16qGFb-jw,3827 +networkx/readwrite/leda.py,sha256=No8DKw26vB2fzaDQNmfOEOuiUS9XGdvBXSKrctJhipg,2749 +networkx/readwrite/multiline_adjlist.py,sha256=Zo7K6gE-FpOBao4KiVaA-yRk4mbQzCJzIl4uUgrWYfM,11255 +networkx/readwrite/p2g.py,sha256=j8vNdr8KKD3o0d1zvfYUJnhe8J9vWxAHLzw3mW85apE,3043 +networkx/readwrite/pajek.py,sha256=aFKB04KvFuCqBawdjvR7U4N8BzPDDatqkf97s8zY_eI,8690 +networkx/readwrite/sparse6.py,sha256=sPL2NBYvB9blDaSJEEdYocHxaU7SmV1OVhYf8a_-LsI,10269 +networkx/readwrite/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/readwrite/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_adjlist.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_edgelist.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_gexf.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_gml.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_graph6.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_graphml.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_leda.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_p2g.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_pajek.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_sparse6.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_text.cpython-39.pyc,, +networkx/readwrite/tests/test_adjlist.py,sha256=dLEv3txnBrHYxajOYAQhA8CA7axiuPw1ECbaHL5p338,9922 +networkx/readwrite/tests/test_edgelist.py,sha256=atBg6Qjhk8boXs3gUZk4gmg-6GOT5rCosEf30sqOZO4,9969 +networkx/readwrite/tests/test_gexf.py,sha256=Tbqueeh0XRQ8vtmGwXcyy9K3tWPlnLu6Gop0Hy4cZcc,19405 +networkx/readwrite/tests/test_gml.py,sha256=GF8rfOj2M3tMtdQ65DMsXypXSFMeWzKEI1qYV-jd5xA,21334 +networkx/readwrite/tests/test_graph6.py,sha256=IjBpfTr-czBLHb8UT_JzvOTBROpnOf5TKKkfCnEeQT8,6069 +networkx/readwrite/tests/test_graphml.py,sha256=u4u-udRXPCXFUZ9oB0_X4UUx3MVULQjx9tkXUdwebhI,67149 +networkx/readwrite/tests/test_leda.py,sha256=_5F4nLLQ1oAZQMZtTQoFncZL0Oc-IsztFBglEdQeH3k,1392 +networkx/readwrite/tests/test_p2g.py,sha256=drsdod5amV9TGCk-qE2RwsvAop78IKEI1WguVFfd9rs,1320 +networkx/readwrite/tests/test_pajek.py,sha256=XTsnaCaYjroysCHlTsYwMGGrDR0B1MRwWkA-WXbAXTg,4703 +networkx/readwrite/tests/test_sparse6.py,sha256=fLpTG0YgcptNOpUipcCcVlni5i8IyC21kkk3ZeD0XhM,5470 +networkx/readwrite/tests/test_text.py,sha256=w17FdFQ4vK3J8d2UKPZUEtIo5udp6UyilPXyIr8JfpE,56562 +networkx/readwrite/text.py,sha256=4rWNkDgtUSmqGUAIXPkOKU7u9PFQOlp5eBqn6yt9QKk,32132 +networkx/relabel.py,sha256=m8R1KovP9IBtzTE2_6NYgMRxB5AFDwZmUCOFYkDoNXI,10279 +networkx/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/tests/__pycache__/test_all_random_functions.cpython-39.pyc,, +networkx/tests/__pycache__/test_convert.cpython-39.pyc,, +networkx/tests/__pycache__/test_convert_numpy.cpython-39.pyc,, +networkx/tests/__pycache__/test_convert_pandas.cpython-39.pyc,, +networkx/tests/__pycache__/test_convert_scipy.cpython-39.pyc,, +networkx/tests/__pycache__/test_exceptions.cpython-39.pyc,, +networkx/tests/__pycache__/test_import.cpython-39.pyc,, +networkx/tests/__pycache__/test_lazy_imports.cpython-39.pyc,, +networkx/tests/__pycache__/test_relabel.cpython-39.pyc,, +networkx/tests/test_all_random_functions.py,sha256=tbFGmaqLrF8lEp0Hn8sOuPzD5rzIpOVOeeBoBk3_W6g,8653 +networkx/tests/test_convert.py,sha256=SoIVrqJFF9Gu9Jff_apfbpqg8QhkfC6QW4qzoSM-ukM,12731 +networkx/tests/test_convert_numpy.py,sha256=R4y5ud0hVZFSGrFjUHD6Anu_aaasy2O_Eke4FaOhPqU,14951 +networkx/tests/test_convert_pandas.py,sha256=cZJEdV0jP8afRZMqJ8-aL9Ma5NdXSWMuj1hVbjGMR2g,12257 +networkx/tests/test_convert_scipy.py,sha256=C2cY_8dgBksO0uttkhyCnjACXtC6KHjxqHUk47P5wH8,10436 +networkx/tests/test_exceptions.py,sha256=XYkpPzqMepSw3MPRUJN5LcFsUsy3YT_fiRDhm0OeAeQ,927 +networkx/tests/test_import.py,sha256=Gm4ujfH9JkQtDrSjOlwXXXUuubI057wskKLCkF6Z92k,220 +networkx/tests/test_lazy_imports.py,sha256=nKykNQPt_ZV8JxCH_EkwwcPNayAgZGQVf89e8I7uIlI,2680 +networkx/tests/test_relabel.py,sha256=dffbjiW_VUAQe7iD8knFS_KepUITt0F6xuwf7daWwKw,14517 +networkx/utils/__init__.py,sha256=T8IdHaWU2MOGbU-1a7JZcAn5YFtO9iDQVt6ky-BRkJg,227 +networkx/utils/__pycache__/__init__.cpython-39.pyc,, +networkx/utils/__pycache__/backends.cpython-39.pyc,, +networkx/utils/__pycache__/decorators.cpython-39.pyc,, +networkx/utils/__pycache__/heaps.cpython-39.pyc,, +networkx/utils/__pycache__/mapped_queue.cpython-39.pyc,, +networkx/utils/__pycache__/misc.cpython-39.pyc,, +networkx/utils/__pycache__/random_sequence.cpython-39.pyc,, +networkx/utils/__pycache__/rcm.cpython-39.pyc,, +networkx/utils/__pycache__/union_find.cpython-39.pyc,, +networkx/utils/backends.py,sha256=5z_pQQrT3kGUCtihbDDqy3TvR9t82KxyMGAaAdsgKLs,40939 +networkx/utils/decorators.py,sha256=Z3U3-pXWD1OKa3cciHG_LSwwylVrpSXdP2rPALq_gZc,46464 +networkx/utils/heaps.py,sha256=HUZuETHfELEqiXdMBPmD9fA2KiACVhp6iEahcrjFxYM,10391 +networkx/utils/mapped_queue.py,sha256=ywJN0Z32EAQ1dezF8ORXP_ca0n16sxQlcIrkQQn5i7I,10185 +networkx/utils/misc.py,sha256=pyN1TGuUFHFfPLubIL-wP-zl_Ybm_U_UWxVw37_tI3g,14351 +networkx/utils/random_sequence.py,sha256=KzKh0BRMri0MBZlzxHNMl3qRTy2DnBexW3eDzmxKab4,4237 +networkx/utils/rcm.py,sha256=MeOhFkv91ALieKJtGHqkhxgO7KJBz53mB8tRcYCX3xk,4623 +networkx/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/utils/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test__init.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_decorators.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_heaps.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_mapped_queue.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_misc.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_random_sequence.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_rcm.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_unionfind.cpython-39.pyc,, +networkx/utils/tests/test__init.py,sha256=QE0i-lNE4pG2eYjB2mZ0uw7jPD-7TdL7Y9p73JoWQmo,363 +networkx/utils/tests/test_decorators.py,sha256=AfxQ_C4BcKG8q9wepyglzIebzD_pPpGRPR4dovl6JR4,13334 +networkx/utils/tests/test_heaps.py,sha256=qCuWMzpcMH1Gwu014CAams78o151QD5YL0mB1fz16Yw,3711 +networkx/utils/tests/test_mapped_queue.py,sha256=l1Nguzz68Fv91FnAT7y7B0GXSoje9uoWiObHo7TliGM,7354 +networkx/utils/tests/test_misc.py,sha256=3oa6D5fnxm9VFODhEwM540hU4IBzEucOoD6DiGvP5gc,8218 +networkx/utils/tests/test_random_sequence.py,sha256=Ou-IeCFybibZuycoin5gUQzzC-iy5yanZFmrqvdGt6Q,925 +networkx/utils/tests/test_rcm.py,sha256=UvUAkgmQMGk_Nn94TJyQsle4A5SLQFqMQWld1tiQ2lk,1421 +networkx/utils/tests/test_unionfind.py,sha256=j-DF5XyeJzq1hoeAgN5Nye2Au7EPD040t8oS4Aw2IwU,1579 +networkx/utils/union_find.py,sha256=NxKlBlyS71A1Wlnt28L-wyZoI9ExZvJth_0e2XSVris,3338 diff --git a/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/WHEEL b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/entry_points.txt b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..2170e9f4285422f4f95b05fa682a9a479c19bf24 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[networkx.backends] +nx-loopback = networkx.classes.tests.dispatch_interface:dispatcher diff --git a/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/top_level.txt b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d07dfe2f85d6849d7f416dcce756b2501ba847e --- /dev/null +++ b/phivenv/Lib/site-packages/networkx-3.2.1.dist-info/top_level.txt @@ -0,0 +1 @@ +networkx diff --git a/phivenv/Lib/site-packages/networkx/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d02fb96f3470c074e876b4e2207f8f1112483eb8 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/__pycache__/conftest.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/__pycache__/conftest.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e211d273abbff9c4ad8f9882df934b994e56044 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/__pycache__/conftest.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/__pycache__/convert.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/__pycache__/convert.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa1c84d1da38ef40ee15fd962749faa79cbc8868 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/__pycache__/convert.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/__pycache__/convert_matrix.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/__pycache__/convert_matrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8379625624f67b1af2d25048d2bb51c38ebd5dbd Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/__pycache__/convert_matrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/__pycache__/exception.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/__pycache__/exception.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c57cf92d9aa8031d960234f8fde4365a61ff1bd Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/__pycache__/exception.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/__pycache__/lazy_imports.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/__pycache__/lazy_imports.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbb9b05c3a48ff5b067c80d09a79307b694d97af Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/__pycache__/lazy_imports.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/__pycache__/relabel.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/__pycache__/relabel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a6a8ff4e8ec66cd1fffc6a75bc56c339644a127 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/__pycache__/relabel.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..145d9f928e8651c3d7be6ae610e5882d80702546 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/atlas.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/atlas.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..971699680d977c41037f8091951cb1437e94bd60 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/atlas.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/classic.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/classic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e620b64c208bd83570a621342c57af59cc6a17c Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/classic.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/cographs.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/cographs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e433766d9354451b8c3f2fc2890245b90de544de Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/cographs.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/community.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/community.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..277c0b62885feb2c2421b1a20a89a5b088edffc3 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/community.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/degree_seq.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/degree_seq.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8d03a7216b7e2bc753575dda2a2982c7d9b3dc0 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/degree_seq.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/directed.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/directed.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c8b5084696be59d01148bdd8e4ffb8d82c513bf Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/directed.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/duplication.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/duplication.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c395d9a2bf06990ba4a76d9c44fdde9a6f19263 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/duplication.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/ego.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/ego.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c55018974ffeb0e08eb00b04bf6521d08387c3b Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/ego.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/expanders.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/expanders.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f737d7c8a5b2c9f31ee52372be284fa25c549b2 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/expanders.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/geometric.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/geometric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94c7e1a2deba5f2e86cb659372c8d4ed85ca01e5 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/geometric.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/harary_graph.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/harary_graph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..082fa484a7326ec36c11ec42df5c1660d1ae6f58 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/harary_graph.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/internet_as_graphs.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/internet_as_graphs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa4f7f523234c9e4805da9d2b37b1106e9cfd927 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/internet_as_graphs.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/intersection.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/intersection.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2b6fe69a006b1838564a0479a60981bc9d54b63 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/intersection.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/interval_graph.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/interval_graph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c4566156040f40d51bf0cb1b43ba4bdc43dc6c4 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/interval_graph.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/joint_degree_seq.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/joint_degree_seq.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3a3cade6e3dc58a6467bc49b644d7226b687cbb Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/joint_degree_seq.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/lattice.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/lattice.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7852aa762f944735f817aa4f05815fa8ff5f0222 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/lattice.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/line.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/line.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2af224bced8a181f006a6c82095bf016eda4a45a Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/line.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/mycielski.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/mycielski.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fa60f1ca9e7785fd8107ac7dca9b522000644e2 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/mycielski.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/nonisomorphic_trees.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/nonisomorphic_trees.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fac091b669c7e231f8de72c792fd344ab8ed2aac Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/nonisomorphic_trees.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/random_clustered.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/random_clustered.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b765f1ed605e6899b647fa8936c5733d735d3e20 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/random_clustered.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/random_graphs.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/random_graphs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..961724014b37e80cd827eef7cb5cf691cc7d0c74 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/random_graphs.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/small.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/small.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fbc886056aaf5e97acbac2d8b80f9579f38a54f Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/small.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/social.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/social.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3a69eed773b104d162abbd37585ce10319e5d3a Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/social.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/spectral_graph_forge.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/spectral_graph_forge.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8d5246413187593e6fcd5c6024390e06191ac0b Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/spectral_graph_forge.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/stochastic.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/stochastic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ed5557b04dcc3a4d97f218316f9f1d4017a7c19 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/stochastic.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/sudoku.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/sudoku.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ede68fb0948e121dbd8f3fb6c10b121dfc480ac Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/sudoku.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/time_series.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/time_series.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7cbfb512810a1edf9349350d28aa4b5b7c5e091 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/time_series.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/trees.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/trees.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bfbefa725eb38cab9c217e34ce08c224dc6ec2f Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/trees.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/__pycache__/triads.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/__pycache__/triads.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9373e5fbb353172629356a8e4bb9a79c26b48fd4 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/__pycache__/triads.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__init__.py b/phivenv/Lib/site-packages/networkx/generators/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cf5e0fca1f86d988daeb53d6848935da9ff140f Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_atlas.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_atlas.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c617cafa0be08fcf2e4468d6c63d89be0e238635 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_atlas.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_classic.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_classic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2652ad5c215c3ddef2feb14a7650866fd5053548 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_classic.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_cographs.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_cographs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95277d33a82d16109263648fd8e73ceddacc8aea Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_cographs.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_community.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_community.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3759ad3451e0f472c776e5da75f3290619a73131 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_community.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_degree_seq.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_degree_seq.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c360675ca71b99b82b5014ee0b33f2716f9cdca3 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_degree_seq.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_directed.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_directed.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1f03e305192ccef96242d51df5bdfb73892a645 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_directed.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_duplication.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_duplication.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..051ec459e404c2aabc237bc71f833739e1302edb Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_duplication.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_ego.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_ego.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a44c212a32e1814aeae8ee8da9a8ba3d77c369b1 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_ego.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_expanders.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_expanders.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..539253c6770de4924e2c2e4c2e32ecaa3be9a2f2 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_expanders.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_geometric.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_geometric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f797a7193b378211ddccb4e7c2e4e1b9bde65cd5 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_geometric.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_harary_graph.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_harary_graph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9a773baec1ce8652066a0ab4a8e190af97f5728 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_harary_graph.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_internet_as_graphs.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_internet_as_graphs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c66c40171df5d2e2efbdeb7a6fc2b6e9289684f2 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_internet_as_graphs.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_intersection.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_intersection.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95052ffe5a604082d6affb7819b1ad057940bc02 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_intersection.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_interval_graph.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_interval_graph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f30835c26421095f2782f31a9a7a217953bfe440 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_interval_graph.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_joint_degree_seq.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_joint_degree_seq.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dffcd9e836676c7cadd0627a1b89197e2c9a6b6 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_joint_degree_seq.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_lattice.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_lattice.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21c130e9c8bbd26e2b6835c1d9283ece962296f9 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_lattice.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_line.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_line.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82764430f4bddeb4fcb3e8a8d193064ac5b361f3 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_line.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_mycielski.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_mycielski.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96c720b414e38c46038ebd5e486d5fc625dcdc2c Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_mycielski.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_nonisomorphic_trees.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_nonisomorphic_trees.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42d4f39d1e7f199612a1909b9810f2db57291edd Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_nonisomorphic_trees.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_random_clustered.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_random_clustered.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..524d67624500f2ebf83338bfe844217867f9f30c Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_random_clustered.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_random_graphs.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_random_graphs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..483228f8cd00c6593532f161f554e46fcda617c0 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_random_graphs.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_small.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_small.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7003cfff9ac4a0ceec2fd10a7f4bc7d787c02ee9 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_small.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b10342b2f3189ecb40b66456596876cf9785ac38 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_stochastic.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_stochastic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..380d238618178118bace45408610a633be13da73 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_stochastic.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_sudoku.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_sudoku.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55797b4b29836c48c570806b8e0c06b1a7b8eb8a Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_sudoku.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_time_series.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_time_series.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d152b493a218261e1d58c29f01db25b7db90c16f Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_time_series.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_trees.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_trees.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe33bb41ebeb06dbbd0cfd554d07e1db5ac03468 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_trees.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_triads.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_triads.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fa8781e74c7434f80eadc528a73bcf4e0e4de7a Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/generators/tests/__pycache__/test_triads.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/test_sudoku.py b/phivenv/Lib/site-packages/networkx/generators/tests/test_sudoku.py new file mode 100644 index 0000000000000000000000000000000000000000..7c3560aa81890d0dc308219d7f0983d3950f9fd5 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/generators/tests/test_sudoku.py @@ -0,0 +1,92 @@ +"""Unit tests for the :mod:`networkx.generators.sudoku_graph` module.""" + +import pytest + +import networkx as nx + + +def test_sudoku_negative(): + """Raise an error when generating a Sudoku graph of order -1.""" + pytest.raises(nx.NetworkXError, nx.sudoku_graph, n=-1) + + +@pytest.mark.parametrize("n", [0, 1, 2, 3, 4]) +def test_sudoku_generator(n): + """Generate Sudoku graphs of various sizes and verify their properties.""" + G = nx.sudoku_graph(n) + expected_nodes = n**4 + expected_degree = (n - 1) * (3 * n + 1) + expected_edges = expected_nodes * expected_degree // 2 + assert not G.is_directed() + assert not G.is_multigraph() + assert G.number_of_nodes() == expected_nodes + assert G.number_of_edges() == expected_edges + assert all(d == expected_degree for _, d in G.degree) + + if n == 2: + assert sorted(G.neighbors(6)) == [2, 3, 4, 5, 7, 10, 14] + elif n == 3: + assert sorted(G.neighbors(42)) == [ + 6, + 15, + 24, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 43, + 44, + 51, + 52, + 53, + 60, + 69, + 78, + ] + elif n == 4: + assert sorted(G.neighbors(0)) == [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 32, + 33, + 34, + 35, + 48, + 49, + 50, + 51, + 64, + 80, + 96, + 112, + 128, + 144, + 160, + 176, + 192, + 208, + 224, + 240, + ] diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/test_time_series.py b/phivenv/Lib/site-packages/networkx/generators/tests/test_time_series.py new file mode 100644 index 0000000000000000000000000000000000000000..9d639d8026f087881689289789f4853ec605cad4 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/generators/tests/test_time_series.py @@ -0,0 +1,63 @@ +"""Unit tests for the :mod:`networkx.generators.time_series` module.""" +import itertools + +import networkx as nx + + +def test_visibility_graph__empty_series__empty_graph(): + null_graph = nx.visibility_graph([]) # move along nothing to see here + assert nx.is_empty(null_graph) + + +def test_visibility_graph__single_value_ts__single_node_graph(): + node_graph = nx.visibility_graph([10]) # So Lonely + assert node_graph.number_of_nodes() == 1 + assert node_graph.number_of_edges() == 0 + + +def test_visibility_graph__two_values_ts__single_edge_graph(): + edge_graph = nx.visibility_graph([10, 20]) # Two of Us + assert list(edge_graph.edges) == [(0, 1)] + + +def test_visibility_graph__convex_series__complete_graph(): + series = [i**2 for i in range(10)] # no obstructions + expected_series_length = len(series) + + actual_graph = nx.visibility_graph(series) + + assert actual_graph.number_of_nodes() == expected_series_length + assert actual_graph.number_of_edges() == 45 + assert nx.is_isomorphic(actual_graph, nx.complete_graph(expected_series_length)) + + +def test_visibility_graph__concave_series__path_graph(): + series = [-(i**2) for i in range(10)] # Slip Slidin' Away + expected_node_count = len(series) + + actual_graph = nx.visibility_graph(series) + + assert actual_graph.number_of_nodes() == expected_node_count + assert actual_graph.number_of_edges() == expected_node_count - 1 + assert nx.is_isomorphic(actual_graph, nx.path_graph(expected_node_count)) + + +def test_visibility_graph__flat_series__path_graph(): + series = [0] * 10 # living in 1D flatland + expected_node_count = len(series) + + actual_graph = nx.visibility_graph(series) + + assert actual_graph.number_of_nodes() == expected_node_count + assert actual_graph.number_of_edges() == expected_node_count - 1 + assert nx.is_isomorphic(actual_graph, nx.path_graph(expected_node_count)) + + +def test_visibility_graph_cyclic_series(): + series = list(itertools.islice(itertools.cycle((2, 1, 3)), 17)) # It's so bumpy! + expected_node_count = len(series) + + actual_graph = nx.visibility_graph(series) + + assert actual_graph.number_of_nodes() == expected_node_count + assert actual_graph.number_of_edges() == 25 diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/test_trees.py b/phivenv/Lib/site-packages/networkx/generators/tests/test_trees.py new file mode 100644 index 0000000000000000000000000000000000000000..a43d1e4b58dee69d1971379a4c817946783dd21d --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/generators/tests/test_trees.py @@ -0,0 +1,217 @@ +import random + +import pytest + +import networkx as nx +from networkx.utils import arbitrary_element, graphs_equal + + +@pytest.mark.parametrize("prefix_tree_fn", (nx.prefix_tree, nx.prefix_tree_recursive)) +def test_basic_prefix_tree(prefix_tree_fn): + # This example is from the Wikipedia article "Trie" + # . + strings = ["a", "to", "tea", "ted", "ten", "i", "in", "inn"] + T = prefix_tree_fn(strings) + root, NIL = 0, -1 + + def source_label(v): + return T.nodes[v]["source"] + + # First, we check that the tree has the expected + # structure. Recall that each node that corresponds to one of + # the input strings has an edge to the NIL node. + # + # Consider the three children at level 1 in the trie. + a, i, t = sorted(T[root], key=source_label) + # Check the 'a' branch. + assert len(T[a]) == 1 + nil = arbitrary_element(T[a]) + assert len(T[nil]) == 0 + # Check the 'i' branch. + assert len(T[i]) == 2 + nil, in_ = sorted(T[i], key=source_label) + assert len(T[nil]) == 0 + assert len(T[in_]) == 2 + nil, inn = sorted(T[in_], key=source_label) + assert len(T[nil]) == 0 + assert len(T[inn]) == 1 + nil = arbitrary_element(T[inn]) + assert len(T[nil]) == 0 + # Check the 't' branch. + te, to = sorted(T[t], key=source_label) + assert len(T[to]) == 1 + nil = arbitrary_element(T[to]) + assert len(T[nil]) == 0 + tea, ted, ten = sorted(T[te], key=source_label) + assert len(T[tea]) == 1 + assert len(T[ted]) == 1 + assert len(T[ten]) == 1 + nil = arbitrary_element(T[tea]) + assert len(T[nil]) == 0 + nil = arbitrary_element(T[ted]) + assert len(T[nil]) == 0 + nil = arbitrary_element(T[ten]) + assert len(T[nil]) == 0 + + # Next, we check that the "sources" of each of the nodes is the + # rightmost letter in the string corresponding to the path to + # that node. + assert source_label(root) is None + assert source_label(a) == "a" + assert source_label(i) == "i" + assert source_label(t) == "t" + assert source_label(in_) == "n" + assert source_label(inn) == "n" + assert source_label(to) == "o" + assert source_label(te) == "e" + assert source_label(tea) == "a" + assert source_label(ted) == "d" + assert source_label(ten) == "n" + assert source_label(NIL) == "NIL" + + +@pytest.mark.parametrize( + "strings", + ( + ["a", "to", "tea", "ted", "ten", "i", "in", "inn"], + ["ab", "abs", "ad"], + ["ab", "abs", "ad", ""], + ["distant", "disparaging", "distant", "diamond", "ruby"], + ), +) +def test_implementations_consistent(strings): + """Ensure results are consistent between prefix_tree implementations.""" + assert graphs_equal(nx.prefix_tree(strings), nx.prefix_tree_recursive(strings)) + + +@pytest.mark.filterwarnings("ignore") +def test_random_tree(): + """Tests that a random tree is in fact a tree.""" + T = nx.random_tree(10, seed=1234) + assert nx.is_tree(T) + + +@pytest.mark.filterwarnings("ignore") +def test_random_directed_tree(): + """Generates a directed tree.""" + T = nx.random_tree(10, seed=1234, create_using=nx.DiGraph()) + assert T.is_directed() + + +@pytest.mark.filterwarnings("ignore") +def test_random_tree_using_generator(): + """Tests that creating a random tree with a generator works""" + G = nx.Graph() + T = nx.random_tree(10, seed=1234, create_using=G) + assert nx.is_tree(T) + + +def test_random_labeled_rooted_tree(): + for i in range(1, 10): + t1 = nx.random_labeled_rooted_tree(i, seed=42) + t2 = nx.random_labeled_rooted_tree(i, seed=42) + assert nx.utils.misc.graphs_equal(t1, t2) + assert nx.is_tree(t1) + assert "root" in t1.graph + assert "roots" not in t1.graph + + +def test_random_labeled_tree_n_zero(): + """Tests if n = 0 then the NetworkXPointlessConcept exception is raised.""" + with pytest.raises(nx.NetworkXPointlessConcept): + T = nx.random_labeled_tree(0, seed=1234) + with pytest.raises(nx.NetworkXPointlessConcept): + T = nx.random_labeled_rooted_tree(0, seed=1234) + + +def test_random_labeled_rooted_forest(): + for i in range(1, 10): + t1 = nx.random_labeled_rooted_forest(i, seed=42) + t2 = nx.random_labeled_rooted_forest(i, seed=42) + assert nx.utils.misc.graphs_equal(t1, t2) + for c in nx.connected_components(t1): + assert nx.is_tree(t1.subgraph(c)) + assert "root" not in t1.graph + assert "roots" in t1.graph + + +def test_random_labeled_rooted_forest_n_zero(): + """Tests generation of empty labeled forests.""" + F = nx.random_labeled_rooted_forest(0, seed=1234) + assert len(F) == 0 + assert len(F.graph["roots"]) == 0 + + +def test_random_unlabeled_rooted_tree(): + for i in range(1, 10): + t1 = nx.random_unlabeled_rooted_tree(i, seed=42) + t2 = nx.random_unlabeled_rooted_tree(i, seed=42) + assert nx.utils.misc.graphs_equal(t1, t2) + assert nx.is_tree(t1) + assert "root" in t1.graph + assert "roots" not in t1.graph + t = nx.random_unlabeled_rooted_tree(15, number_of_trees=10, seed=43) + random.seed(43) + s = nx.random_unlabeled_rooted_tree(15, number_of_trees=10, seed=random) + for i in range(10): + assert nx.utils.misc.graphs_equal(t[i], s[i]) + assert nx.is_tree(t[i]) + assert "root" in t[i].graph + assert "roots" not in t[i].graph + + +def test_random_unlabeled_tree_n_zero(): + """Tests if n = 0 then the NetworkXPointlessConcept exception is raised.""" + with pytest.raises(nx.NetworkXPointlessConcept): + T = nx.random_unlabeled_tree(0, seed=1234) + with pytest.raises(nx.NetworkXPointlessConcept): + T = nx.random_unlabeled_rooted_tree(0, seed=1234) + + +def test_random_unlabeled_rooted_forest(): + with pytest.raises(ValueError): + nx.random_unlabeled_rooted_forest(10, q=0, seed=42) + for i in range(1, 10): + for q in range(1, i + 1): + t1 = nx.random_unlabeled_rooted_forest(i, q=q, seed=42) + t2 = nx.random_unlabeled_rooted_forest(i, q=q, seed=42) + assert nx.utils.misc.graphs_equal(t1, t2) + for c in nx.connected_components(t1): + assert nx.is_tree(t1.subgraph(c)) + assert len(c) <= q + assert "root" not in t1.graph + assert "roots" in t1.graph + t = nx.random_unlabeled_rooted_forest(15, number_of_forests=10, seed=43) + random.seed(43) + s = nx.random_unlabeled_rooted_forest(15, number_of_forests=10, seed=random) + for i in range(10): + assert nx.utils.misc.graphs_equal(t[i], s[i]) + for c in nx.connected_components(t[i]): + assert nx.is_tree(t[i].subgraph(c)) + assert "root" not in t[i].graph + assert "roots" in t[i].graph + + +def test_random_unlabeled_forest_n_zero(): + """Tests generation of empty unlabeled forests.""" + F = nx.random_unlabeled_rooted_forest(0, seed=1234) + assert len(F) == 0 + assert len(F.graph["roots"]) == 0 + + +def test_random_unlabeled_tree(): + for i in range(1, 10): + t1 = nx.random_unlabeled_tree(i, seed=42) + t2 = nx.random_unlabeled_tree(i, seed=42) + assert nx.utils.misc.graphs_equal(t1, t2) + assert nx.is_tree(t1) + assert "root" not in t1.graph + assert "roots" not in t1.graph + t = nx.random_unlabeled_tree(10, number_of_trees=10, seed=43) + random.seed(43) + s = nx.random_unlabeled_tree(10, number_of_trees=10, seed=random) + for i in range(10): + assert nx.utils.misc.graphs_equal(t[i], s[i]) + assert nx.is_tree(t[i]) + assert "root" not in t[i].graph + assert "roots" not in t[i].graph diff --git a/phivenv/Lib/site-packages/networkx/generators/tests/test_triads.py b/phivenv/Lib/site-packages/networkx/generators/tests/test_triads.py new file mode 100644 index 0000000000000000000000000000000000000000..6fc51ae18f89dc33aaa4c89e8bf9b93edc41f4b5 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/generators/tests/test_triads.py @@ -0,0 +1,14 @@ +"""Unit tests for the :mod:`networkx.generators.triads` module.""" +import pytest + +from networkx import triad_graph + + +def test_triad_graph(): + G = triad_graph("030T") + assert [tuple(e) for e in ("ab", "ac", "cb")] == sorted(G.edges()) + + +def test_invalid_name(): + with pytest.raises(ValueError): + triad_graph("bogus") diff --git a/phivenv/Lib/site-packages/networkx/linalg/__init__.py b/phivenv/Lib/site-packages/networkx/linalg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..119db185a1ae440fd2cdb6c7f531331642313c34 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/__init__.py @@ -0,0 +1,13 @@ +from networkx.linalg.attrmatrix import * +from networkx.linalg import attrmatrix +from networkx.linalg.spectrum import * +from networkx.linalg import spectrum +from networkx.linalg.graphmatrix import * +from networkx.linalg import graphmatrix +from networkx.linalg.laplacianmatrix import * +from networkx.linalg import laplacianmatrix +from networkx.linalg.algebraicconnectivity import * +from networkx.linalg.modularitymatrix import * +from networkx.linalg import modularitymatrix +from networkx.linalg.bethehessianmatrix import * +from networkx.linalg import bethehessianmatrix diff --git a/phivenv/Lib/site-packages/networkx/linalg/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..720b08063d888766023881c18d4d0407b94c5bcb Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/__pycache__/algebraicconnectivity.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/algebraicconnectivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c792000f7e1f6554366a1ff348c29697ef0f8cf4 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/algebraicconnectivity.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/__pycache__/attrmatrix.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/attrmatrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e7948bf93f3a08286b262ada17fa820cf43cbd4 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/attrmatrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/__pycache__/bethehessianmatrix.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/bethehessianmatrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..531fa7663ef2f8f2ee146f34e0b5c2be7bf14bee Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/bethehessianmatrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/__pycache__/graphmatrix.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/graphmatrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c82e52844bf342cfa0b010b3079e6e95318b593 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/graphmatrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/__pycache__/laplacianmatrix.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/laplacianmatrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f4b9a5ecd765eb8edb7547550f7729db07b344f Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/laplacianmatrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/__pycache__/modularitymatrix.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/modularitymatrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f3c7b2827d2b2e1e5b1ddc7aff69a0d898fd5cf Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/modularitymatrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/__pycache__/spectrum.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/spectrum.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..918e3a1ce982fa5fa326d4e30c34b995d181174b Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/__pycache__/spectrum.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/algebraicconnectivity.py b/phivenv/Lib/site-packages/networkx/linalg/algebraicconnectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..b70d204a84dd07f0f7ba5fb7665dbddfd008fa7c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/algebraicconnectivity.py @@ -0,0 +1,656 @@ +""" +Algebraic connectivity and Fiedler vectors of undirected graphs. +""" +from functools import partial + +import networkx as nx +from networkx.utils import ( + not_implemented_for, + np_random_state, + reverse_cuthill_mckee_ordering, +) + +__all__ = [ + "algebraic_connectivity", + "fiedler_vector", + "spectral_ordering", + "spectral_bisection", +] + + +class _PCGSolver: + """Preconditioned conjugate gradient method. + + To solve Ax = b: + M = A.diagonal() # or some other preconditioner + solver = _PCGSolver(lambda x: A * x, lambda x: M * x) + x = solver.solve(b) + + The inputs A and M are functions which compute + matrix multiplication on the argument. + A - multiply by the matrix A in Ax=b + M - multiply by M, the preconditioner surrogate for A + + Warning: There is no limit on number of iterations. + """ + + def __init__(self, A, M): + self._A = A + self._M = M + + def solve(self, B, tol): + import numpy as np + + # Densifying step - can this be kept sparse? + B = np.asarray(B) + X = np.ndarray(B.shape, order="F") + for j in range(B.shape[1]): + X[:, j] = self._solve(B[:, j], tol) + return X + + def _solve(self, b, tol): + import numpy as np + import scipy as sp + + A = self._A + M = self._M + tol *= sp.linalg.blas.dasum(b) + # Initialize. + x = np.zeros(b.shape) + r = b.copy() + z = M(r) + rz = sp.linalg.blas.ddot(r, z) + p = z.copy() + # Iterate. + while True: + Ap = A(p) + alpha = rz / sp.linalg.blas.ddot(p, Ap) + x = sp.linalg.blas.daxpy(p, x, a=alpha) + r = sp.linalg.blas.daxpy(Ap, r, a=-alpha) + if sp.linalg.blas.dasum(r) < tol: + return x + z = M(r) + beta = sp.linalg.blas.ddot(r, z) + beta, rz = beta / rz, beta + p = sp.linalg.blas.daxpy(p, z, a=beta) + + +class _LUSolver: + """LU factorization. + + To solve Ax = b: + solver = _LUSolver(A) + x = solver.solve(b) + + optional argument `tol` on solve method is ignored but included + to match _PCGsolver API. + """ + + def __init__(self, A): + import scipy as sp + + self._LU = sp.sparse.linalg.splu( + A, + permc_spec="MMD_AT_PLUS_A", + diag_pivot_thresh=0.0, + options={"Equil": True, "SymmetricMode": True}, + ) + + def solve(self, B, tol=None): + import numpy as np + + B = np.asarray(B) + X = np.ndarray(B.shape, order="F") + for j in range(B.shape[1]): + X[:, j] = self._LU.solve(B[:, j]) + return X + + +def _preprocess_graph(G, weight): + """Compute edge weights and eliminate zero-weight edges.""" + if G.is_directed(): + H = nx.MultiGraph() + H.add_nodes_from(G) + H.add_weighted_edges_from( + ((u, v, e.get(weight, 1.0)) for u, v, e in G.edges(data=True) if u != v), + weight=weight, + ) + G = H + if not G.is_multigraph(): + edges = ( + (u, v, abs(e.get(weight, 1.0))) for u, v, e in G.edges(data=True) if u != v + ) + else: + edges = ( + (u, v, sum(abs(e.get(weight, 1.0)) for e in G[u][v].values())) + for u, v in G.edges() + if u != v + ) + H = nx.Graph() + H.add_nodes_from(G) + H.add_weighted_edges_from((u, v, e) for u, v, e in edges if e != 0) + return H + + +def _rcm_estimate(G, nodelist): + """Estimate the Fiedler vector using the reverse Cuthill-McKee ordering.""" + import numpy as np + + G = G.subgraph(nodelist) + order = reverse_cuthill_mckee_ordering(G) + n = len(nodelist) + index = dict(zip(nodelist, range(n))) + x = np.ndarray(n, dtype=float) + for i, u in enumerate(order): + x[index[u]] = i + x -= (n - 1) / 2.0 + return x + + +def _tracemin_fiedler(L, X, normalized, tol, method): + """Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm. + + The Fiedler vector of a connected undirected graph is the eigenvector + corresponding to the second smallest eigenvalue of the Laplacian matrix + of the graph. This function starts with the Laplacian L, not the Graph. + + Parameters + ---------- + L : Laplacian of a possibly weighted or normalized, but undirected graph + + X : Initial guess for a solution. Usually a matrix of random numbers. + This function allows more than one column in X to identify more than + one eigenvector if desired. + + normalized : bool + Whether the normalized Laplacian matrix is used. + + tol : float + Tolerance of relative residual in eigenvalue computation. + Warning: There is no limit on number of iterations. + + method : string + Should be 'tracemin_pcg' or 'tracemin_lu'. + Otherwise exception is raised. + + Returns + ------- + sigma, X : Two NumPy arrays of floats. + The lowest eigenvalues and corresponding eigenvectors of L. + The size of input X determines the size of these outputs. + As this is for Fiedler vectors, the zero eigenvalue (and + constant eigenvector) are avoided. + """ + import numpy as np + import scipy as sp + + n = X.shape[0] + + if normalized: + # Form the normalized Laplacian matrix and determine the eigenvector of + # its nullspace. + e = np.sqrt(L.diagonal()) + # TODO: rm csr_array wrapper when spdiags array creation becomes available + D = sp.sparse.csr_array(sp.sparse.spdiags(1 / e, 0, n, n, format="csr")) + L = D @ L @ D + e *= 1.0 / np.linalg.norm(e, 2) + + if normalized: + + def project(X): + """Make X orthogonal to the nullspace of L.""" + X = np.asarray(X) + for j in range(X.shape[1]): + X[:, j] -= (X[:, j] @ e) * e + + else: + + def project(X): + """Make X orthogonal to the nullspace of L.""" + X = np.asarray(X) + for j in range(X.shape[1]): + X[:, j] -= X[:, j].sum() / n + + if method == "tracemin_pcg": + D = L.diagonal().astype(float) + solver = _PCGSolver(lambda x: L @ x, lambda x: D * x) + elif method == "tracemin_lu": + # Convert A to CSC to suppress SparseEfficiencyWarning. + A = sp.sparse.csc_array(L, dtype=float, copy=True) + # Force A to be nonsingular. Since A is the Laplacian matrix of a + # connected graph, its rank deficiency is one, and thus one diagonal + # element needs to modified. Changing to infinity forces a zero in the + # corresponding element in the solution. + i = (A.indptr[1:] - A.indptr[:-1]).argmax() + A[i, i] = float("inf") + solver = _LUSolver(A) + else: + raise nx.NetworkXError(f"Unknown linear system solver: {method}") + + # Initialize. + Lnorm = abs(L).sum(axis=1).flatten().max() + project(X) + W = np.ndarray(X.shape, order="F") + + while True: + # Orthonormalize X. + X = np.linalg.qr(X)[0] + # Compute iteration matrix H. + W[:, :] = L @ X + H = X.T @ W + sigma, Y = sp.linalg.eigh(H, overwrite_a=True) + # Compute the Ritz vectors. + X = X @ Y + # Test for convergence exploiting the fact that L * X == W * Y. + res = sp.linalg.blas.dasum(W @ Y[:, 0] - sigma[0] * X[:, 0]) / Lnorm + if res < tol: + break + # Compute X = L \ X / (X' * (L \ X)). + # L \ X can have an arbitrary projection on the nullspace of L, + # which will be eliminated. + W[:, :] = solver.solve(X, tol) + X = (sp.linalg.inv(W.T @ X) @ W.T).T # Preserves Fortran storage order. + project(X) + + return sigma, np.asarray(X) + + +def _get_fiedler_func(method): + """Returns a function that solves the Fiedler eigenvalue problem.""" + import numpy as np + + if method == "tracemin": # old style keyword `. + + Returns + ------- + algebraic_connectivity : float + Algebraic connectivity. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + NetworkXError + If G has less than two nodes. + + Notes + ----- + Edge weights are interpreted by their absolute values. For MultiGraph's, + weights of parallel edges are summed. Zero-weighted edges are ignored. + + See Also + -------- + laplacian_matrix + + Examples + -------- + For undirected graphs algebraic connectivity can tell us if a graph is connected or not + `G` is connected iff ``algebraic_connectivity(G) > 0``: + + >>> G = nx.complete_graph(5) + >>> nx.algebraic_connectivity(G) > 0 + True + >>> G.add_node(10) # G is no longer connected + >>> nx.algebraic_connectivity(G) > 0 + False + + """ + if len(G) < 2: + raise nx.NetworkXError("graph has less than two nodes.") + G = _preprocess_graph(G, weight) + if not nx.is_connected(G): + return 0.0 + + L = nx.laplacian_matrix(G) + if L.shape[0] == 2: + return 2.0 * L[0, 0] if not normalized else 2.0 + + find_fiedler = _get_fiedler_func(method) + x = None if method != "lobpcg" else _rcm_estimate(G, G) + sigma, fiedler = find_fiedler(L, x, normalized, tol, seed) + return sigma + + +@not_implemented_for("directed") +@np_random_state(5) +@nx._dispatch(edge_attrs="weight") +def fiedler_vector( + G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None +): + """Returns the Fiedler vector of a connected undirected graph. + + The Fiedler vector of a connected undirected graph is the eigenvector + corresponding to the second smallest eigenvalue of the Laplacian matrix + of the graph. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + weight : object, optional (default: None) + The data key used to determine the weight of each edge. If None, then + each edge has unit weight. + + normalized : bool, optional (default: False) + Whether the normalized Laplacian matrix is used. + + tol : float, optional (default: 1e-8) + Tolerance of relative residual in eigenvalue computation. + + method : string, optional (default: 'tracemin_pcg') + Method of eigenvalue computation. It must be one of the tracemin + options shown below (TraceMIN), 'lanczos' (Lanczos iteration) + or 'lobpcg' (LOBPCG). + + The TraceMIN algorithm uses a linear system solver. The following + values allow specifying the solver to be used. + + =============== ======================================== + Value Solver + =============== ======================================== + 'tracemin_pcg' Preconditioned conjugate gradient method + 'tracemin_lu' LU factorization + =============== ======================================== + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + fiedler_vector : NumPy array of floats. + Fiedler vector. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + NetworkXError + If G has less than two nodes or is not connected. + + Notes + ----- + Edge weights are interpreted by their absolute values. For MultiGraph's, + weights of parallel edges are summed. Zero-weighted edges are ignored. + + See Also + -------- + laplacian_matrix + + Examples + -------- + Given a connected graph the signs of the values in the Fiedler vector can be + used to partition the graph into two components. + + >>> G = nx.barbell_graph(5, 0) + >>> nx.fiedler_vector(G, normalized=True, seed=1) + array([-0.32864129, -0.32864129, -0.32864129, -0.32864129, -0.26072899, + 0.26072899, 0.32864129, 0.32864129, 0.32864129, 0.32864129]) + + The connected components are the two 5-node cliques of the barbell graph. + """ + import numpy as np + + if len(G) < 2: + raise nx.NetworkXError("graph has less than two nodes.") + G = _preprocess_graph(G, weight) + if not nx.is_connected(G): + raise nx.NetworkXError("graph is not connected.") + + if len(G) == 2: + return np.array([1.0, -1.0]) + + find_fiedler = _get_fiedler_func(method) + L = nx.laplacian_matrix(G) + x = None if method != "lobpcg" else _rcm_estimate(G, G) + sigma, fiedler = find_fiedler(L, x, normalized, tol, seed) + return fiedler + + +@np_random_state(5) +@nx._dispatch(edge_attrs="weight") +def spectral_ordering( + G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None +): + """Compute the spectral_ordering of a graph. + + The spectral ordering of a graph is an ordering of its nodes where nodes + in the same weakly connected components appear contiguous and ordered by + their corresponding elements in the Fiedler vector of the component. + + Parameters + ---------- + G : NetworkX graph + A graph. + + weight : object, optional (default: None) + The data key used to determine the weight of each edge. If None, then + each edge has unit weight. + + normalized : bool, optional (default: False) + Whether the normalized Laplacian matrix is used. + + tol : float, optional (default: 1e-8) + Tolerance of relative residual in eigenvalue computation. + + method : string, optional (default: 'tracemin_pcg') + Method of eigenvalue computation. It must be one of the tracemin + options shown below (TraceMIN), 'lanczos' (Lanczos iteration) + or 'lobpcg' (LOBPCG). + + The TraceMIN algorithm uses a linear system solver. The following + values allow specifying the solver to be used. + + =============== ======================================== + Value Solver + =============== ======================================== + 'tracemin_pcg' Preconditioned conjugate gradient method + 'tracemin_lu' LU factorization + =============== ======================================== + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + spectral_ordering : NumPy array of floats. + Spectral ordering of nodes. + + Raises + ------ + NetworkXError + If G is empty. + + Notes + ----- + Edge weights are interpreted by their absolute values. For MultiGraph's, + weights of parallel edges are summed. Zero-weighted edges are ignored. + + See Also + -------- + laplacian_matrix + """ + if len(G) == 0: + raise nx.NetworkXError("graph is empty.") + G = _preprocess_graph(G, weight) + + find_fiedler = _get_fiedler_func(method) + order = [] + for component in nx.connected_components(G): + size = len(component) + if size > 2: + L = nx.laplacian_matrix(G, component) + x = None if method != "lobpcg" else _rcm_estimate(G, component) + sigma, fiedler = find_fiedler(L, x, normalized, tol, seed) + sort_info = zip(fiedler, range(size), component) + order.extend(u for x, c, u in sorted(sort_info)) + else: + order.extend(component) + + return order + + +@nx._dispatch(edge_attrs="weight") +def spectral_bisection( + G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None +): + """Bisect the graph using the Fiedler vector. + + This method uses the Fiedler vector to bisect a graph. + The partition is defined by the nodes which are associated with + either positive or negative values in the vector. + + Parameters + ---------- + G : NetworkX Graph + + weight : str, optional (default: weight) + The data key used to determine the weight of each edge. If None, then + each edge has unit weight. + + normalized : bool, optional (default: False) + Whether the normalized Laplacian matrix is used. + + tol : float, optional (default: 1e-8) + Tolerance of relative residual in eigenvalue computation. + + method : string, optional (default: 'tracemin_pcg') + Method of eigenvalue computation. It must be one of the tracemin + options shown below (TraceMIN), 'lanczos' (Lanczos iteration) + or 'lobpcg' (LOBPCG). + + The TraceMIN algorithm uses a linear system solver. The following + values allow specifying the solver to be used. + + =============== ======================================== + Value Solver + =============== ======================================== + 'tracemin_pcg' Preconditioned conjugate gradient method + 'tracemin_lu' LU factorization + =============== ======================================== + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + bisection : tuple of sets + Sets with the bisection of nodes + + Examples + -------- + >>> G = nx.barbell_graph(3, 0) + >>> nx.spectral_bisection(G) + ({0, 1, 2}, {3, 4, 5}) + + References + ---------- + .. [1] M. E. J Newman 'Networks: An Introduction', pages 364-370 + Oxford University Press 2011. + """ + import numpy as np + + v = nx.fiedler_vector(G, weight, normalized, tol, method, seed) + nodes = np.array(list(G)) + pos_vals = v >= 0 + + return set(nodes[~pos_vals]), set(nodes[pos_vals]) diff --git a/phivenv/Lib/site-packages/networkx/linalg/attrmatrix.py b/phivenv/Lib/site-packages/networkx/linalg/attrmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..eb6e57c857ca22c93e54d06cfe5a4337185af9a3 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/attrmatrix.py @@ -0,0 +1,464 @@ +""" + Functions for constructing matrix-like objects from graph attributes. +""" +import networkx as nx + +__all__ = ["attr_matrix", "attr_sparse_matrix"] + + +def _node_value(G, node_attr): + """Returns a function that returns a value from G.nodes[u]. + + We return a function expecting a node as its sole argument. Then, in the + simplest scenario, the returned function will return G.nodes[u][node_attr]. + However, we also handle the case when `node_attr` is None or when it is a + function itself. + + Parameters + ---------- + G : graph + A NetworkX graph + + node_attr : {None, str, callable} + Specification of how the value of the node attribute should be obtained + from the node attribute dictionary. + + Returns + ------- + value : function + A function expecting a node as its sole argument. The function will + returns a value from G.nodes[u] that depends on `edge_attr`. + + """ + if node_attr is None: + + def value(u): + return u + + elif not callable(node_attr): + # assume it is a key for the node attribute dictionary + def value(u): + return G.nodes[u][node_attr] + + else: + # Advanced: Allow users to specify something else. + # + # For example, + # node_attr = lambda u: G.nodes[u].get('size', .5) * 3 + # + value = node_attr + + return value + + +def _edge_value(G, edge_attr): + """Returns a function that returns a value from G[u][v]. + + Suppose there exists an edge between u and v. Then we return a function + expecting u and v as arguments. For Graph and DiGraph, G[u][v] is + the edge attribute dictionary, and the function (essentially) returns + G[u][v][edge_attr]. However, we also handle cases when `edge_attr` is None + and when it is a function itself. For MultiGraph and MultiDiGraph, G[u][v] + is a dictionary of all edges between u and v. In this case, the returned + function sums the value of `edge_attr` for every edge between u and v. + + Parameters + ---------- + G : graph + A NetworkX graph + + edge_attr : {None, str, callable} + Specification of how the value of the edge attribute should be obtained + from the edge attribute dictionary, G[u][v]. For multigraphs, G[u][v] + is a dictionary of all the edges between u and v. This allows for + special treatment of multiedges. + + Returns + ------- + value : function + A function expecting two nodes as parameters. The nodes should + represent the from- and to- node of an edge. The function will + return a value from G[u][v] that depends on `edge_attr`. + + """ + + if edge_attr is None: + # topological count of edges + + if G.is_multigraph(): + + def value(u, v): + return len(G[u][v]) + + else: + + def value(u, v): + return 1 + + elif not callable(edge_attr): + # assume it is a key for the edge attribute dictionary + + if edge_attr == "weight": + # provide a default value + if G.is_multigraph(): + + def value(u, v): + return sum(d.get(edge_attr, 1) for d in G[u][v].values()) + + else: + + def value(u, v): + return G[u][v].get(edge_attr, 1) + + else: + # otherwise, the edge attribute MUST exist for each edge + if G.is_multigraph(): + + def value(u, v): + return sum(d[edge_attr] for d in G[u][v].values()) + + else: + + def value(u, v): + return G[u][v][edge_attr] + + else: + # Advanced: Allow users to specify something else. + # + # Alternative default value: + # edge_attr = lambda u,v: G[u][v].get('thickness', .5) + # + # Function on an attribute: + # edge_attr = lambda u,v: abs(G[u][v]['weight']) + # + # Handle Multi(Di)Graphs differently: + # edge_attr = lambda u,v: numpy.prod([d['size'] for d in G[u][v].values()]) + # + # Ignore multiple edges + # edge_attr = lambda u,v: 1 if len(G[u][v]) else 0 + # + value = edge_attr + + return value + + +@nx._dispatch(edge_attrs={"edge_attr": None}, node_attrs="node_attr") +def attr_matrix( + G, + edge_attr=None, + node_attr=None, + normalized=False, + rc_order=None, + dtype=None, + order=None, +): + """Returns the attribute matrix using attributes from `G` as a numpy array. + + If only `G` is passed in, then the adjacency matrix is constructed. + + Let A be a discrete set of values for the node attribute `node_attr`. Then + the elements of A represent the rows and columns of the constructed matrix. + Now, iterate through every edge e=(u,v) in `G` and consider the value + of the edge attribute `edge_attr`. If ua and va are the values of the + node attribute `node_attr` for u and v, respectively, then the value of + the edge attribute is added to the matrix element at (ua, va). + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the attribute matrix. + + edge_attr : str, optional + Each element of the matrix represents a running total of the + specified edge attribute for edges whose node attributes correspond + to the rows/cols of the matrix. The attribute must be present for + all edges in the graph. If no attribute is specified, then we + just count the number of edges whose node attributes correspond + to the matrix element. + + node_attr : str, optional + Each row and column in the matrix represents a particular value + of the node attribute. The attribute must be present for all nodes + in the graph. Note, the values of this attribute should be reliably + hashable. So, float values are not recommended. If no attribute is + specified, then the rows and columns will be the nodes of the graph. + + normalized : bool, optional + If True, then each row is normalized by the summation of its values. + + rc_order : list, optional + A list of the node attribute values. This list specifies the ordering + of rows and columns of the array. If no ordering is provided, then + the ordering will be random (and also, a return value). + + Other Parameters + ---------------- + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. Keep in mind certain + dtypes can yield unexpected results if the array is to be normalized. + The parameter is passed to numpy.zeros(). If unspecified, the NumPy + default is used. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. This parameter is passed to + numpy.zeros(). If unspecified, the NumPy default is used. + + Returns + ------- + M : 2D NumPy ndarray + The attribute matrix. + + ordering : list + If `rc_order` was specified, then only the attribute matrix is returned. + However, if `rc_order` was None, then the ordering used to construct + the matrix is returned as well. + + Examples + -------- + Construct an adjacency matrix: + + >>> G = nx.Graph() + >>> G.add_edge(0, 1, thickness=1, weight=3) + >>> G.add_edge(0, 2, thickness=2) + >>> G.add_edge(1, 2, thickness=3) + >>> nx.attr_matrix(G, rc_order=[0, 1, 2]) + array([[0., 1., 1.], + [1., 0., 1.], + [1., 1., 0.]]) + + Alternatively, we can obtain the matrix describing edge thickness. + + >>> nx.attr_matrix(G, edge_attr="thickness", rc_order=[0, 1, 2]) + array([[0., 1., 2.], + [1., 0., 3.], + [2., 3., 0.]]) + + We can also color the nodes and ask for the probability distribution over + all edges (u,v) describing: + + Pr(v has color Y | u has color X) + + >>> G.nodes[0]["color"] = "red" + >>> G.nodes[1]["color"] = "red" + >>> G.nodes[2]["color"] = "blue" + >>> rc = ["red", "blue"] + >>> nx.attr_matrix(G, node_attr="color", normalized=True, rc_order=rc) + array([[0.33333333, 0.66666667], + [1. , 0. ]]) + + For example, the above tells us that for all edges (u,v): + + Pr( v is red | u is red) = 1/3 + Pr( v is blue | u is red) = 2/3 + + Pr( v is red | u is blue) = 1 + Pr( v is blue | u is blue) = 0 + + Finally, we can obtain the total weights listed by the node colors. + + >>> nx.attr_matrix(G, edge_attr="weight", node_attr="color", rc_order=rc) + array([[3., 2.], + [2., 0.]]) + + Thus, the total weight over all edges (u,v) with u and v having colors: + + (red, red) is 3 # the sole contribution is from edge (0,1) + (red, blue) is 2 # contributions from edges (0,2) and (1,2) + (blue, red) is 2 # same as (red, blue) since graph is undirected + (blue, blue) is 0 # there are no edges with blue endpoints + + """ + import numpy as np + + edge_value = _edge_value(G, edge_attr) + node_value = _node_value(G, node_attr) + + if rc_order is None: + ordering = list({node_value(n) for n in G}) + else: + ordering = rc_order + + N = len(ordering) + undirected = not G.is_directed() + index = dict(zip(ordering, range(N))) + M = np.zeros((N, N), dtype=dtype, order=order) + + seen = set() + for u, nbrdict in G.adjacency(): + for v in nbrdict: + # Obtain the node attribute values. + i, j = index[node_value(u)], index[node_value(v)] + if v not in seen: + M[i, j] += edge_value(u, v) + if undirected: + M[j, i] = M[i, j] + + if undirected: + seen.add(u) + + if normalized: + M /= M.sum(axis=1).reshape((N, 1)) + + if rc_order is None: + return M, ordering + else: + return M + + +@nx._dispatch(edge_attrs={"edge_attr": None}, node_attrs="node_attr") +def attr_sparse_matrix( + G, edge_attr=None, node_attr=None, normalized=False, rc_order=None, dtype=None +): + """Returns a SciPy sparse array using attributes from G. + + If only `G` is passed in, then the adjacency matrix is constructed. + + Let A be a discrete set of values for the node attribute `node_attr`. Then + the elements of A represent the rows and columns of the constructed matrix. + Now, iterate through every edge e=(u,v) in `G` and consider the value + of the edge attribute `edge_attr`. If ua and va are the values of the + node attribute `node_attr` for u and v, respectively, then the value of + the edge attribute is added to the matrix element at (ua, va). + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy matrix. + + edge_attr : str, optional + Each element of the matrix represents a running total of the + specified edge attribute for edges whose node attributes correspond + to the rows/cols of the matrix. The attribute must be present for + all edges in the graph. If no attribute is specified, then we + just count the number of edges whose node attributes correspond + to the matrix element. + + node_attr : str, optional + Each row and column in the matrix represents a particular value + of the node attribute. The attribute must be present for all nodes + in the graph. Note, the values of this attribute should be reliably + hashable. So, float values are not recommended. If no attribute is + specified, then the rows and columns will be the nodes of the graph. + + normalized : bool, optional + If True, then each row is normalized by the summation of its values. + + rc_order : list, optional + A list of the node attribute values. This list specifies the ordering + of rows and columns of the array. If no ordering is provided, then + the ordering will be random (and also, a return value). + + Other Parameters + ---------------- + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. Keep in mind certain + dtypes can yield unexpected results if the array is to be normalized. + The parameter is passed to numpy.zeros(). If unspecified, the NumPy + default is used. + + Returns + ------- + M : SciPy sparse array + The attribute matrix. + + ordering : list + If `rc_order` was specified, then only the matrix is returned. + However, if `rc_order` was None, then the ordering used to construct + the matrix is returned as well. + + Examples + -------- + Construct an adjacency matrix: + + >>> G = nx.Graph() + >>> G.add_edge(0, 1, thickness=1, weight=3) + >>> G.add_edge(0, 2, thickness=2) + >>> G.add_edge(1, 2, thickness=3) + >>> M = nx.attr_sparse_matrix(G, rc_order=[0, 1, 2]) + >>> M.toarray() + array([[0., 1., 1.], + [1., 0., 1.], + [1., 1., 0.]]) + + Alternatively, we can obtain the matrix describing edge thickness. + + >>> M = nx.attr_sparse_matrix(G, edge_attr="thickness", rc_order=[0, 1, 2]) + >>> M.toarray() + array([[0., 1., 2.], + [1., 0., 3.], + [2., 3., 0.]]) + + We can also color the nodes and ask for the probability distribution over + all edges (u,v) describing: + + Pr(v has color Y | u has color X) + + >>> G.nodes[0]["color"] = "red" + >>> G.nodes[1]["color"] = "red" + >>> G.nodes[2]["color"] = "blue" + >>> rc = ["red", "blue"] + >>> M = nx.attr_sparse_matrix(G, node_attr="color", normalized=True, rc_order=rc) + >>> M.toarray() + array([[0.33333333, 0.66666667], + [1. , 0. ]]) + + For example, the above tells us that for all edges (u,v): + + Pr( v is red | u is red) = 1/3 + Pr( v is blue | u is red) = 2/3 + + Pr( v is red | u is blue) = 1 + Pr( v is blue | u is blue) = 0 + + Finally, we can obtain the total weights listed by the node colors. + + >>> M = nx.attr_sparse_matrix(G, edge_attr="weight", node_attr="color", rc_order=rc) + >>> M.toarray() + array([[3., 2.], + [2., 0.]]) + + Thus, the total weight over all edges (u,v) with u and v having colors: + + (red, red) is 3 # the sole contribution is from edge (0,1) + (red, blue) is 2 # contributions from edges (0,2) and (1,2) + (blue, red) is 2 # same as (red, blue) since graph is undirected + (blue, blue) is 0 # there are no edges with blue endpoints + + """ + import numpy as np + import scipy as sp + + edge_value = _edge_value(G, edge_attr) + node_value = _node_value(G, node_attr) + + if rc_order is None: + ordering = list({node_value(n) for n in G}) + else: + ordering = rc_order + + N = len(ordering) + undirected = not G.is_directed() + index = dict(zip(ordering, range(N))) + M = sp.sparse.lil_array((N, N), dtype=dtype) + + seen = set() + for u, nbrdict in G.adjacency(): + for v in nbrdict: + # Obtain the node attribute values. + i, j = index[node_value(u)], index[node_value(v)] + if v not in seen: + M[i, j] += edge_value(u, v) + if undirected: + M[j, i] = M[i, j] + + if undirected: + seen.add(u) + + if normalized: + M *= 1 / M.sum(axis=1)[:, np.newaxis] # in-place mult preserves sparse + + if rc_order is None: + return M, ordering + else: + return M diff --git a/phivenv/Lib/site-packages/networkx/linalg/bethehessianmatrix.py b/phivenv/Lib/site-packages/networkx/linalg/bethehessianmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..542fd1c6712fbca1f2823325264dc1cd158fe6cc --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/bethehessianmatrix.py @@ -0,0 +1,78 @@ +"""Bethe Hessian or deformed Laplacian matrix of graphs.""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["bethe_hessian_matrix"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def bethe_hessian_matrix(G, r=None, nodelist=None): + r"""Returns the Bethe Hessian matrix of G. + + The Bethe Hessian is a family of matrices parametrized by r, defined as + H(r) = (r^2 - 1) I - r A + D where A is the adjacency matrix, D is the + diagonal matrix of node degrees, and I is the identify matrix. It is equal + to the graph laplacian when the regularizer r = 1. + + The default choice of regularizer should be the ratio [2]_ + + .. math:: + r_m = \left(\sum k_i \right)^{-1}\left(\sum k_i^2 \right) - 1 + + Parameters + ---------- + G : Graph + A NetworkX graph + r : float + Regularizer parameter + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by ``G.nodes()``. + + Returns + ------- + H : scipy.sparse.csr_array + The Bethe Hessian matrix of `G`, with parameter `r`. + + Examples + -------- + >>> k = [3, 2, 2, 1, 0] + >>> G = nx.havel_hakimi_graph(k) + >>> H = nx.bethe_hessian_matrix(G) + >>> H.toarray() + array([[ 3.5625, -1.25 , -1.25 , -1.25 , 0. ], + [-1.25 , 2.5625, -1.25 , 0. , 0. ], + [-1.25 , -1.25 , 2.5625, 0. , 0. ], + [-1.25 , 0. , 0. , 1.5625, 0. ], + [ 0. , 0. , 0. , 0. , 0.5625]]) + + See Also + -------- + bethe_hessian_spectrum + adjacency_matrix + laplacian_matrix + + References + ---------- + .. [1] A. Saade, F. Krzakala and L. Zdeborová + "Spectral Clustering of Graphs with the Bethe Hessian", + Advances in Neural Information Processing Systems, 2014. + .. [2] C. M. Le, E. Levina + "Estimating the number of communities in networks by spectral methods" + arXiv:1507.00827, 2015. + """ + import scipy as sp + + if nodelist is None: + nodelist = list(G) + if r is None: + r = sum(d**2 for v, d in nx.degree(G)) / sum(d for v, d in nx.degree(G)) - 1 + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, format="csr") + n, m = A.shape + # TODO: Rm csr_array wrapper when spdiags array creation becomes available + D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, m, n, format="csr")) + # TODO: Rm csr_array wrapper when eye array creation becomes available + I = sp.sparse.csr_array(sp.sparse.eye(m, n, format="csr")) + return (r**2 - 1) * I - r * A + D diff --git a/phivenv/Lib/site-packages/networkx/linalg/graphmatrix.py b/phivenv/Lib/site-packages/networkx/linalg/graphmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..9dece36b48c8d88d6a4bf12d433387db90d8cf7e --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/graphmatrix.py @@ -0,0 +1,166 @@ +""" +Adjacency matrix and incidence matrix of graphs. +""" +import networkx as nx + +__all__ = ["incidence_matrix", "adjacency_matrix"] + + +@nx._dispatch(edge_attrs="weight") +def incidence_matrix( + G, nodelist=None, edgelist=None, oriented=False, weight=None, *, dtype=None +): + """Returns incidence matrix of G. + + The incidence matrix assigns each row to a node and each column to an edge. + For a standard incidence matrix a 1 appears wherever a row's node is + incident on the column's edge. For an oriented incidence matrix each + edge is assigned an orientation (arbitrarily for undirected and aligning to + direction for directed). A -1 appears for the source (tail) of an edge and + 1 for the destination (head) of the edge. The elements are zero otherwise. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list, optional (default= all nodes in G) + The rows are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + edgelist : list, optional (default= all edges in G) + The columns are ordered according to the edges in edgelist. + If edgelist is None, then the ordering is produced by G.edges(). + + oriented: bool, optional (default=False) + If True, matrix elements are +1 or -1 for the head or tail node + respectively of each edge. If False, +1 occurs at both nodes. + + weight : string or None, optional (default=None) + The edge data key used to provide each value in the matrix. + If None, then each edge has weight 1. Edge weights, if used, + should be positive so that the orientation can provide the sign. + + dtype : a NumPy dtype or None (default=None) + The dtype of the output sparse array. This type should be a compatible + type of the weight argument, eg. if weight would return a float this + argument should also be a float. + If None, then the default for SciPy is used. + + Returns + ------- + A : SciPy sparse array + The incidence matrix of G. + + Notes + ----- + For MultiGraph/MultiDiGraph, the edges in edgelist should be + (u,v,key) 3-tuples. + + "Networks are the best discrete model for so many problems in + applied mathematics" [1]_. + + References + ---------- + .. [1] Gil Strang, Network applications: A = incidence matrix, + http://videolectures.net/mit18085f07_strang_lec03/ + """ + import scipy as sp + + if nodelist is None: + nodelist = list(G) + if edgelist is None: + if G.is_multigraph(): + edgelist = list(G.edges(keys=True)) + else: + edgelist = list(G.edges()) + A = sp.sparse.lil_array((len(nodelist), len(edgelist)), dtype=dtype) + node_index = {node: i for i, node in enumerate(nodelist)} + for ei, e in enumerate(edgelist): + (u, v) = e[:2] + if u == v: + continue # self loops give zero column + try: + ui = node_index[u] + vi = node_index[v] + except KeyError as err: + raise nx.NetworkXError( + f"node {u} or {v} in edgelist but not in nodelist" + ) from err + if weight is None: + wt = 1 + else: + if G.is_multigraph(): + ekey = e[2] + wt = G[u][v][ekey].get(weight, 1) + else: + wt = G[u][v].get(weight, 1) + if oriented: + A[ui, ei] = -wt + A[vi, ei] = wt + else: + A[ui, ei] = wt + A[vi, ei] = wt + return A.asformat("csc") + + +@nx._dispatch(edge_attrs="weight") +def adjacency_matrix(G, nodelist=None, dtype=None, weight="weight"): + """Returns adjacency matrix of G. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + dtype : NumPy data-type, optional + The desired data-type for the array. + If None, then the NumPy default is used. + + weight : string or None, optional (default='weight') + The edge data key used to provide each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + A : SciPy sparse array + Adjacency matrix representation of G. + + Notes + ----- + For directed graphs, entry i,j corresponds to an edge from i to j. + + If you want a pure Python adjacency matrix representation try + networkx.convert.to_dict_of_dicts which will return a + dictionary-of-dictionaries format that can be addressed as a + sparse matrix. + + For MultiGraph/MultiDiGraph with parallel edges the weights are summed. + See `to_numpy_array` for other options. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the edge weight attribute + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting SciPy sparse array can be modified as follows: + + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.adjacency_matrix(G) + >>> print(A.todense()) + [[1]] + >>> A.setdiag(A.diagonal() * 2) + >>> print(A.todense()) + [[2]] + + See Also + -------- + to_numpy_array + to_scipy_sparse_array + to_dict_of_dicts + adjacency_spectrum + """ + return nx.to_scipy_sparse_array(G, nodelist=nodelist, dtype=dtype, weight=weight) diff --git a/phivenv/Lib/site-packages/networkx/linalg/laplacianmatrix.py b/phivenv/Lib/site-packages/networkx/linalg/laplacianmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..13763828131825824a73b121d36d2be8892fb63a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/laplacianmatrix.py @@ -0,0 +1,428 @@ +"""Laplacian matrix of graphs. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "laplacian_matrix", + "normalized_laplacian_matrix", + "total_spanning_tree_weight", + "directed_laplacian_matrix", + "directed_combinatorial_laplacian_matrix", +] + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def laplacian_matrix(G, nodelist=None, weight="weight"): + """Returns the Laplacian matrix of G. + + The graph Laplacian is the matrix L = D - A, where + A is the adjacency matrix and D is the diagonal matrix of node degrees. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + L : SciPy sparse array + The Laplacian matrix of G. + + Notes + ----- + For MultiGraph, the edges weights are summed. + + See Also + -------- + :func:`~networkx.convert_matrix.to_numpy_array` + normalized_laplacian_matrix + :func:`~networkx.linalg.spectrum.laplacian_spectrum` + + Examples + -------- + For graphs with multiple connected components, L is permutation-similar + to a block diagonal matrix where each block is the respective Laplacian + matrix for each component. + + >>> G = nx.Graph([(1, 2), (2, 3), (4, 5)]) + >>> print(nx.laplacian_matrix(G).toarray()) + [[ 1 -1 0 0 0] + [-1 2 -1 0 0] + [ 0 -1 1 0 0] + [ 0 0 0 1 -1] + [ 0 0 0 -1 1]] + + """ + import scipy as sp + + if nodelist is None: + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") + n, m = A.shape + # TODO: rm csr_array wrapper when spdiags can produce arrays + D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, m, n, format="csr")) + return D - A + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def normalized_laplacian_matrix(G, nodelist=None, weight="weight"): + r"""Returns the normalized Laplacian matrix of G. + + The normalized graph Laplacian is the matrix + + .. math:: + + N = D^{-1/2} L D^{-1/2} + + where `L` is the graph Laplacian and `D` is the diagonal matrix of + node degrees [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + N : SciPy sparse array + The normalized Laplacian matrix of G. + + Notes + ----- + For MultiGraph, the edges weights are summed. + See :func:`to_numpy_array` for other options. + + If the Graph contains selfloops, D is defined as ``diag(sum(A, 1))``, where A is + the adjacency matrix [2]_. + + See Also + -------- + laplacian_matrix + normalized_laplacian_spectrum + + References + ---------- + .. [1] Fan Chung-Graham, Spectral Graph Theory, + CBMS Regional Conference Series in Mathematics, Number 92, 1997. + .. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized + Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98, + March 2007. + """ + import numpy as np + import scipy as sp + + if nodelist is None: + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") + n, m = A.shape + diags = A.sum(axis=1) + # TODO: rm csr_array wrapper when spdiags can produce arrays + D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, m, n, format="csr")) + L = D - A + with np.errstate(divide="ignore"): + diags_sqrt = 1.0 / np.sqrt(diags) + diags_sqrt[np.isinf(diags_sqrt)] = 0 + # TODO: rm csr_array wrapper when spdiags can produce arrays + DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format="csr")) + return DH @ (L @ DH) + + +@nx._dispatch(edge_attrs="weight") +def total_spanning_tree_weight(G, weight=None): + """ + Returns the total weight of all spanning trees of `G`. + + Kirchoff's Tree Matrix Theorem states that the determinant of any cofactor of the + Laplacian matrix of a graph is the number of spanning trees in the graph. For a + weighted Laplacian matrix, it is the sum across all spanning trees of the + multiplicative weight of each tree. That is, the weight of each tree is the + product of its edge weights. + + Parameters + ---------- + G : NetworkX Graph + The graph to use Kirchhoff's theorem on. + + weight : string or None + The key for the edge attribute holding the edge weight. If `None`, then + each edge is assumed to have a weight of 1 and this function returns the + total number of spanning trees in `G`. + + Returns + ------- + float + The sum of the total multiplicative weights for all spanning trees in `G` + """ + import numpy as np + + G_laplacian = nx.laplacian_matrix(G, weight=weight).toarray() + # Determinant ignoring first row and column + return abs(np.linalg.det(G_laplacian[1:, 1:])) + + +############################################################################### +# Code based on work from https://github.com/bjedwards + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def directed_laplacian_matrix( + G, nodelist=None, weight="weight", walk_type=None, alpha=0.95 +): + r"""Returns the directed Laplacian matrix of G. + + The graph directed Laplacian is the matrix + + .. math:: + + L = I - (\Phi^{1/2} P \Phi^{-1/2} + \Phi^{-1/2} P^T \Phi^{1/2} ) / 2 + + where `I` is the identity matrix, `P` is the transition matrix of the + graph, and `\Phi` a matrix with the Perron vector of `P` in the diagonal and + zeros elsewhere [1]_. + + Depending on the value of walk_type, `P` can be the transition matrix + induced by a random walk, a lazy random walk, or a random walk with + teleportation (PageRank). + + Parameters + ---------- + G : DiGraph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + walk_type : string or None, optional (default=None) + If None, `P` is selected depending on the properties of the + graph. Otherwise is one of 'random', 'lazy', or 'pagerank' + + alpha : real + (1 - alpha) is the teleportation probability used with pagerank + + Returns + ------- + L : NumPy matrix + Normalized Laplacian of G. + + Notes + ----- + Only implemented for DiGraphs + + See Also + -------- + laplacian_matrix + + References + ---------- + .. [1] Fan Chung (2005). + Laplacians and the Cheeger inequality for directed graphs. + Annals of Combinatorics, 9(1), 2005 + """ + import numpy as np + import scipy as sp + + # NOTE: P has type ndarray if walk_type=="pagerank", else csr_array + P = _transition_matrix( + G, nodelist=nodelist, weight=weight, walk_type=walk_type, alpha=alpha + ) + + n, m = P.shape + + evals, evecs = sp.sparse.linalg.eigs(P.T, k=1) + v = evecs.flatten().real + p = v / v.sum() + # p>=0 by Perron-Frobenius Thm. Use abs() to fix roundoff across zero gh-6865 + sqrtp = np.sqrt(np.abs(p)) + Q = ( + # TODO: rm csr_array wrapper when spdiags creates arrays + sp.sparse.csr_array(sp.sparse.spdiags(sqrtp, 0, n, n)) + @ P + # TODO: rm csr_array wrapper when spdiags creates arrays + @ sp.sparse.csr_array(sp.sparse.spdiags(1.0 / sqrtp, 0, n, n)) + ) + # NOTE: This could be sparsified for the non-pagerank cases + I = np.identity(len(G)) + + return I - (Q + Q.T) / 2.0 + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def directed_combinatorial_laplacian_matrix( + G, nodelist=None, weight="weight", walk_type=None, alpha=0.95 +): + r"""Return the directed combinatorial Laplacian matrix of G. + + The graph directed combinatorial Laplacian is the matrix + + .. math:: + + L = \Phi - (\Phi P + P^T \Phi) / 2 + + where `P` is the transition matrix of the graph and `\Phi` a matrix + with the Perron vector of `P` in the diagonal and zeros elsewhere [1]_. + + Depending on the value of walk_type, `P` can be the transition matrix + induced by a random walk, a lazy random walk, or a random walk with + teleportation (PageRank). + + Parameters + ---------- + G : DiGraph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + walk_type : string or None, optional (default=None) + If None, `P` is selected depending on the properties of the + graph. Otherwise is one of 'random', 'lazy', or 'pagerank' + + alpha : real + (1 - alpha) is the teleportation probability used with pagerank + + Returns + ------- + L : NumPy matrix + Combinatorial Laplacian of G. + + Notes + ----- + Only implemented for DiGraphs + + See Also + -------- + laplacian_matrix + + References + ---------- + .. [1] Fan Chung (2005). + Laplacians and the Cheeger inequality for directed graphs. + Annals of Combinatorics, 9(1), 2005 + """ + import scipy as sp + + P = _transition_matrix( + G, nodelist=nodelist, weight=weight, walk_type=walk_type, alpha=alpha + ) + + n, m = P.shape + + evals, evecs = sp.sparse.linalg.eigs(P.T, k=1) + v = evecs.flatten().real + p = v / v.sum() + # NOTE: could be improved by not densifying + # TODO: Rm csr_array wrapper when spdiags array creation becomes available + Phi = sp.sparse.csr_array(sp.sparse.spdiags(p, 0, n, n)).toarray() + + return Phi - (Phi @ P + P.T @ Phi) / 2.0 + + +def _transition_matrix(G, nodelist=None, weight="weight", walk_type=None, alpha=0.95): + """Returns the transition matrix of G. + + This is a row stochastic giving the transition probabilities while + performing a random walk on the graph. Depending on the value of walk_type, + P can be the transition matrix induced by a random walk, a lazy random walk, + or a random walk with teleportation (PageRank). + + Parameters + ---------- + G : DiGraph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + walk_type : string or None, optional (default=None) + If None, `P` is selected depending on the properties of the + graph. Otherwise is one of 'random', 'lazy', or 'pagerank' + + alpha : real + (1 - alpha) is the teleportation probability used with pagerank + + Returns + ------- + P : numpy.ndarray + transition matrix of G. + + Raises + ------ + NetworkXError + If walk_type not specified or alpha not in valid range + """ + import numpy as np + import scipy as sp + + if walk_type is None: + if nx.is_strongly_connected(G): + if nx.is_aperiodic(G): + walk_type = "random" + else: + walk_type = "lazy" + else: + walk_type = "pagerank" + + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, dtype=float) + n, m = A.shape + if walk_type in ["random", "lazy"]: + # TODO: Rm csr_array wrapper when spdiags array creation becomes available + DI = sp.sparse.csr_array(sp.sparse.spdiags(1.0 / A.sum(axis=1), 0, n, n)) + if walk_type == "random": + P = DI @ A + else: + # TODO: Rm csr_array wrapper when identity array creation becomes available + I = sp.sparse.csr_array(sp.sparse.identity(n)) + P = (I + DI @ A) / 2.0 + + elif walk_type == "pagerank": + if not (0 < alpha < 1): + raise nx.NetworkXError("alpha must be between 0 and 1") + # this is using a dense representation. NOTE: This should be sparsified! + A = A.toarray() + # add constant to dangling nodes' row + A[A.sum(axis=1) == 0, :] = 1 / n + # normalize + A = A / A.sum(axis=1)[np.newaxis, :].T + P = alpha * A + (1 - alpha) / n + else: + raise nx.NetworkXError("walk_type must be random, lazy, or pagerank") + + return P diff --git a/phivenv/Lib/site-packages/networkx/linalg/modularitymatrix.py b/phivenv/Lib/site-packages/networkx/linalg/modularitymatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..03671a1fa10916d91571b9651d621be9d67420e7 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/modularitymatrix.py @@ -0,0 +1,166 @@ +"""Modularity matrix of graphs. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["modularity_matrix", "directed_modularity_matrix"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def modularity_matrix(G, nodelist=None, weight=None): + r"""Returns the modularity matrix of G. + + The modularity matrix is the matrix B = A - , where A is the adjacency + matrix and is the average adjacency matrix, assuming that the graph + is described by the configuration model. + + More specifically, the element B_ij of B is defined as + + .. math:: + A_{ij} - {k_i k_j \over 2 m} + + where k_i is the degree of node i, and where m is the number of edges + in the graph. When weight is set to a name of an attribute edge, Aij, k_i, + k_j and m are computed using its value. + + Parameters + ---------- + G : Graph + A NetworkX graph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used for + the edge weight. If None then all edge weights are 1. + + Returns + ------- + B : Numpy array + The modularity matrix of G. + + Examples + -------- + >>> k = [3, 2, 2, 1, 0] + >>> G = nx.havel_hakimi_graph(k) + >>> B = nx.modularity_matrix(G) + + + See Also + -------- + to_numpy_array + modularity_spectrum + adjacency_matrix + directed_modularity_matrix + + References + ---------- + .. [1] M. E. J. Newman, "Modularity and community structure in networks", + Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006. + """ + import numpy as np + + if nodelist is None: + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") + k = A.sum(axis=1) + m = k.sum() * 0.5 + # Expected adjacency matrix + X = np.outer(k, k) / (2 * m) + + return A - X + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def directed_modularity_matrix(G, nodelist=None, weight=None): + """Returns the directed modularity matrix of G. + + The modularity matrix is the matrix B = A - , where A is the adjacency + matrix and is the expected adjacency matrix, assuming that the graph + is described by the configuration model. + + More specifically, the element B_ij of B is defined as + + .. math:: + B_{ij} = A_{ij} - k_i^{out} k_j^{in} / m + + where :math:`k_i^{in}` is the in degree of node i, and :math:`k_j^{out}` is the out degree + of node j, with m the number of edges in the graph. When weight is set + to a name of an attribute edge, Aij, k_i, k_j and m are computed using + its value. + + Parameters + ---------- + G : DiGraph + A NetworkX DiGraph + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used for + the edge weight. If None then all edge weights are 1. + + Returns + ------- + B : Numpy array + The modularity matrix of G. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from( + ... ( + ... (1, 2), + ... (1, 3), + ... (3, 1), + ... (3, 2), + ... (3, 5), + ... (4, 5), + ... (4, 6), + ... (5, 4), + ... (5, 6), + ... (6, 4), + ... ) + ... ) + >>> B = nx.directed_modularity_matrix(G) + + + Notes + ----- + NetworkX defines the element A_ij of the adjacency matrix as 1 if there + is a link going from node i to node j. Leicht and Newman use the opposite + definition. This explains the different expression for B_ij. + + See Also + -------- + to_numpy_array + modularity_spectrum + adjacency_matrix + modularity_matrix + + References + ---------- + .. [1] E. A. Leicht, M. E. J. Newman, + "Community structure in directed networks", + Phys. Rev Lett., vol. 100, no. 11, p. 118703, 2008. + """ + import numpy as np + + if nodelist is None: + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") + k_in = A.sum(axis=0) + k_out = A.sum(axis=1) + m = k_in.sum() + # Expected adjacency matrix + X = np.outer(k_out, k_in) / m + + return A - X diff --git a/phivenv/Lib/site-packages/networkx/linalg/spectrum.py b/phivenv/Lib/site-packages/networkx/linalg/spectrum.py new file mode 100644 index 0000000000000000000000000000000000000000..979eeabd814caec0c97da942f00c86795f335663 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/spectrum.py @@ -0,0 +1,185 @@ +""" +Eigenvalue spectrum of graphs. +""" +import networkx as nx + +__all__ = [ + "laplacian_spectrum", + "adjacency_spectrum", + "modularity_spectrum", + "normalized_laplacian_spectrum", + "bethe_hessian_spectrum", +] + + +@nx._dispatch(edge_attrs="weight") +def laplacian_spectrum(G, weight="weight"): + """Returns eigenvalues of the Laplacian of G + + Parameters + ---------- + G : graph + A NetworkX graph + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + evals : NumPy array + Eigenvalues + + Notes + ----- + For MultiGraph/MultiDiGraph, the edges weights are summed. + See :func:`~networkx.convert_matrix.to_numpy_array` for other options. + + See Also + -------- + laplacian_matrix + + Examples + -------- + The multiplicity of 0 as an eigenvalue of the laplacian matrix is equal + to the number of connected components of G. + + >>> G = nx.Graph() # Create a graph with 5 nodes and 3 connected components + >>> G.add_nodes_from(range(5)) + >>> G.add_edges_from([(0, 2), (3, 4)]) + >>> nx.laplacian_spectrum(G) + array([0., 0., 0., 2., 2.]) + + """ + import scipy as sp + + return sp.linalg.eigvalsh(nx.laplacian_matrix(G, weight=weight).todense()) + + +@nx._dispatch(edge_attrs="weight") +def normalized_laplacian_spectrum(G, weight="weight"): + """Return eigenvalues of the normalized Laplacian of G + + Parameters + ---------- + G : graph + A NetworkX graph + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + evals : NumPy array + Eigenvalues + + Notes + ----- + For MultiGraph/MultiDiGraph, the edges weights are summed. + See to_numpy_array for other options. + + See Also + -------- + normalized_laplacian_matrix + """ + import scipy as sp + + return sp.linalg.eigvalsh( + nx.normalized_laplacian_matrix(G, weight=weight).todense() + ) + + +@nx._dispatch(edge_attrs="weight") +def adjacency_spectrum(G, weight="weight"): + """Returns eigenvalues of the adjacency matrix of G. + + Parameters + ---------- + G : graph + A NetworkX graph + + weight : string or None, optional (default='weight') + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + evals : NumPy array + Eigenvalues + + Notes + ----- + For MultiGraph/MultiDiGraph, the edges weights are summed. + See to_numpy_array for other options. + + See Also + -------- + adjacency_matrix + """ + import scipy as sp + + return sp.linalg.eigvals(nx.adjacency_matrix(G, weight=weight).todense()) + + +@nx._dispatch +def modularity_spectrum(G): + """Returns eigenvalues of the modularity matrix of G. + + Parameters + ---------- + G : Graph + A NetworkX Graph or DiGraph + + Returns + ------- + evals : NumPy array + Eigenvalues + + See Also + -------- + modularity_matrix + + References + ---------- + .. [1] M. E. J. Newman, "Modularity and community structure in networks", + Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006. + """ + import scipy as sp + + if G.is_directed(): + return sp.linalg.eigvals(nx.directed_modularity_matrix(G)) + else: + return sp.linalg.eigvals(nx.modularity_matrix(G)) + + +@nx._dispatch +def bethe_hessian_spectrum(G, r=None): + """Returns eigenvalues of the Bethe Hessian matrix of G. + + Parameters + ---------- + G : Graph + A NetworkX Graph or DiGraph + + r : float + Regularizer parameter + + Returns + ------- + evals : NumPy array + Eigenvalues + + See Also + -------- + bethe_hessian_matrix + + References + ---------- + .. [1] A. Saade, F. Krzakala and L. Zdeborová + "Spectral clustering of graphs with the bethe hessian", + Advances in Neural Information Processing Systems. 2014. + """ + import scipy as sp + + return sp.linalg.eigvalsh(nx.bethe_hessian_matrix(G, r).todense()) diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/__init__.py b/phivenv/Lib/site-packages/networkx/linalg/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..877e81266e1ab926114cbd0373a0c131508fd99e Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_algebraic_connectivity.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_algebraic_connectivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c56c15f79fba876404d219219c5943b6cef9a99 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_algebraic_connectivity.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_attrmatrix.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_attrmatrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d5057f1477ada039f2a3c1cb584e410547633b1 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_attrmatrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_bethehessian.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_bethehessian.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2b7df6fe5da1e3a8a7c5048070d32c8198e2d5b Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_bethehessian.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_graphmatrix.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_graphmatrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89b23eb4680837ddce835cedf4760e8cb6df1973 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_graphmatrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_laplacian.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_laplacian.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fd799398bdef789bace7341633c9466a1e2aaaa Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_laplacian.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_modularity.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_modularity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83093e7d2104e73b0be4d4373c26b0ab702869c5 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_modularity.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_spectrum.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_spectrum.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01cde42dca57484d2a6da3f1a96ef2b281c62cd4 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/linalg/tests/__pycache__/test_spectrum.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/test_algebraic_connectivity.py b/phivenv/Lib/site-packages/networkx/linalg/tests/test_algebraic_connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..089d917a6832e1ab211eb3ced08344b84ddb285a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/tests/test_algebraic_connectivity.py @@ -0,0 +1,402 @@ +from math import sqrt + +import pytest + +np = pytest.importorskip("numpy") + + +import networkx as nx + +methods = ("tracemin_pcg", "tracemin_lu", "lanczos", "lobpcg") + + +def test_algebraic_connectivity_tracemin_chol(): + """Test that "tracemin_chol" raises an exception.""" + pytest.importorskip("scipy") + G = nx.barbell_graph(5, 4) + with pytest.raises(nx.NetworkXError): + nx.algebraic_connectivity(G, method="tracemin_chol") + + +def test_fiedler_vector_tracemin_chol(): + """Test that "tracemin_chol" raises an exception.""" + pytest.importorskip("scipy") + G = nx.barbell_graph(5, 4) + with pytest.raises(nx.NetworkXError): + nx.fiedler_vector(G, method="tracemin_chol") + + +def test_spectral_ordering_tracemin_chol(): + """Test that "tracemin_chol" raises an exception.""" + pytest.importorskip("scipy") + G = nx.barbell_graph(5, 4) + with pytest.raises(nx.NetworkXError): + nx.spectral_ordering(G, method="tracemin_chol") + + +def test_fiedler_vector_tracemin_unknown(): + """Test that "tracemin_unknown" raises an exception.""" + pytest.importorskip("scipy") + G = nx.barbell_graph(5, 4) + L = nx.laplacian_matrix(G) + X = np.asarray(np.random.normal(size=(1, L.shape[0]))).T + with pytest.raises(nx.NetworkXError, match="Unknown linear system solver"): + nx.linalg.algebraicconnectivity._tracemin_fiedler( + L, X, normalized=False, tol=1e-8, method="tracemin_unknown" + ) + + +def test_spectral_bisection(): + pytest.importorskip("scipy") + G = nx.barbell_graph(3, 0) + C = nx.spectral_bisection(G) + assert C == ({0, 1, 2}, {3, 4, 5}) + + mapping = dict(enumerate("badfec")) + G = nx.relabel_nodes(G, mapping) + C = nx.spectral_bisection(G) + assert C == ( + {mapping[0], mapping[1], mapping[2]}, + {mapping[3], mapping[4], mapping[5]}, + ) + + +def check_eigenvector(A, l, x): + nx = np.linalg.norm(x) + # Check zeroness. + assert nx != pytest.approx(0, abs=1e-07) + y = A @ x + ny = np.linalg.norm(y) + # Check collinearity. + assert x @ y == pytest.approx(nx * ny, abs=1e-7) + # Check eigenvalue. + assert ny == pytest.approx(l * nx, abs=1e-7) + + +class TestAlgebraicConnectivity: + @pytest.mark.parametrize("method", methods) + def test_directed(self, method): + G = nx.DiGraph() + pytest.raises( + nx.NetworkXNotImplemented, nx.algebraic_connectivity, G, method=method + ) + pytest.raises(nx.NetworkXNotImplemented, nx.fiedler_vector, G, method=method) + + @pytest.mark.parametrize("method", methods) + def test_null_and_singleton(self, method): + G = nx.Graph() + pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G, method=method) + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method) + G.add_edge(0, 0) + pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G, method=method) + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method) + + @pytest.mark.parametrize("method", methods) + def test_disconnected(self, method): + G = nx.Graph() + G.add_nodes_from(range(2)) + assert nx.algebraic_connectivity(G) == 0 + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method) + G.add_edge(0, 1, weight=0) + assert nx.algebraic_connectivity(G) == 0 + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method=method) + + def test_unrecognized_method(self): + pytest.importorskip("scipy") + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, nx.algebraic_connectivity, G, method="unknown") + pytest.raises(nx.NetworkXError, nx.fiedler_vector, G, method="unknown") + + @pytest.mark.parametrize("method", methods) + def test_two_nodes(self, method): + pytest.importorskip("scipy") + G = nx.Graph() + G.add_edge(0, 1, weight=1) + A = nx.laplacian_matrix(G) + assert nx.algebraic_connectivity(G, tol=1e-12, method=method) == pytest.approx( + 2, abs=1e-7 + ) + x = nx.fiedler_vector(G, tol=1e-12, method=method) + check_eigenvector(A, 2, x) + + @pytest.mark.parametrize("method", methods) + def test_two_nodes_multigraph(self, method): + pytest.importorskip("scipy") + G = nx.MultiGraph() + G.add_edge(0, 0, spam=1e8) + G.add_edge(0, 1, spam=1) + G.add_edge(0, 1, spam=-2) + A = -3 * nx.laplacian_matrix(G, weight="spam") + assert nx.algebraic_connectivity( + G, weight="spam", tol=1e-12, method=method + ) == pytest.approx(6, abs=1e-7) + x = nx.fiedler_vector(G, weight="spam", tol=1e-12, method=method) + check_eigenvector(A, 6, x) + + def test_abbreviation_of_method(self): + pytest.importorskip("scipy") + G = nx.path_graph(8) + A = nx.laplacian_matrix(G) + sigma = 2 - sqrt(2 + sqrt(2)) + ac = nx.algebraic_connectivity(G, tol=1e-12, method="tracemin") + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method="tracemin") + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize("method", methods) + def test_path(self, method): + pytest.importorskip("scipy") + G = nx.path_graph(8) + A = nx.laplacian_matrix(G) + sigma = 2 - sqrt(2 + sqrt(2)) + ac = nx.algebraic_connectivity(G, tol=1e-12, method=method) + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method=method) + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize("method", methods) + def test_problematic_graph_issue_2381(self, method): + pytest.importorskip("scipy") + G = nx.path_graph(4) + G.add_edges_from([(4, 2), (5, 1)]) + A = nx.laplacian_matrix(G) + sigma = 0.438447187191 + ac = nx.algebraic_connectivity(G, tol=1e-12, method=method) + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method=method) + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize("method", methods) + def test_cycle(self, method): + pytest.importorskip("scipy") + G = nx.cycle_graph(8) + A = nx.laplacian_matrix(G) + sigma = 2 - sqrt(2) + ac = nx.algebraic_connectivity(G, tol=1e-12, method=method) + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method=method) + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize("method", methods) + def test_seed_argument(self, method): + pytest.importorskip("scipy") + G = nx.cycle_graph(8) + A = nx.laplacian_matrix(G) + sigma = 2 - sqrt(2) + ac = nx.algebraic_connectivity(G, tol=1e-12, method=method, seed=1) + assert ac == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, tol=1e-12, method=method, seed=1) + check_eigenvector(A, sigma, x) + + @pytest.mark.parametrize( + ("normalized", "sigma", "laplacian_fn"), + ( + (False, 0.2434017461399311, nx.laplacian_matrix), + (True, 0.08113391537997749, nx.normalized_laplacian_matrix), + ), + ) + @pytest.mark.parametrize("method", methods) + def test_buckminsterfullerene(self, normalized, sigma, laplacian_fn, method): + pytest.importorskip("scipy") + G = nx.Graph( + [ + (1, 10), + (1, 41), + (1, 59), + (2, 12), + (2, 42), + (2, 60), + (3, 6), + (3, 43), + (3, 57), + (4, 8), + (4, 44), + (4, 58), + (5, 13), + (5, 56), + (5, 57), + (6, 10), + (6, 31), + (7, 14), + (7, 56), + (7, 58), + (8, 12), + (8, 32), + (9, 23), + (9, 53), + (9, 59), + (10, 15), + (11, 24), + (11, 53), + (11, 60), + (12, 16), + (13, 14), + (13, 25), + (14, 26), + (15, 27), + (15, 49), + (16, 28), + (16, 50), + (17, 18), + (17, 19), + (17, 54), + (18, 20), + (18, 55), + (19, 23), + (19, 41), + (20, 24), + (20, 42), + (21, 31), + (21, 33), + (21, 57), + (22, 32), + (22, 34), + (22, 58), + (23, 24), + (25, 35), + (25, 43), + (26, 36), + (26, 44), + (27, 51), + (27, 59), + (28, 52), + (28, 60), + (29, 33), + (29, 34), + (29, 56), + (30, 51), + (30, 52), + (30, 53), + (31, 47), + (32, 48), + (33, 45), + (34, 46), + (35, 36), + (35, 37), + (36, 38), + (37, 39), + (37, 49), + (38, 40), + (38, 50), + (39, 40), + (39, 51), + (40, 52), + (41, 47), + (42, 48), + (43, 49), + (44, 50), + (45, 46), + (45, 54), + (46, 55), + (47, 54), + (48, 55), + ] + ) + A = laplacian_fn(G) + try: + assert nx.algebraic_connectivity( + G, normalized=normalized, tol=1e-12, method=method + ) == pytest.approx(sigma, abs=1e-7) + x = nx.fiedler_vector(G, normalized=normalized, tol=1e-12, method=method) + check_eigenvector(A, sigma, x) + except nx.NetworkXError as err: + if err.args not in ( + ("Cholesky solver unavailable.",), + ("LU solver unavailable.",), + ): + raise + + +class TestSpectralOrdering: + _graphs = (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph) + + @pytest.mark.parametrize("graph", _graphs) + def test_nullgraph(self, graph): + G = graph() + pytest.raises(nx.NetworkXError, nx.spectral_ordering, G) + + @pytest.mark.parametrize("graph", _graphs) + def test_singleton(self, graph): + G = graph() + G.add_node("x") + assert nx.spectral_ordering(G) == ["x"] + G.add_edge("x", "x", weight=33) + G.add_edge("x", "x", weight=33) + assert nx.spectral_ordering(G) == ["x"] + + def test_unrecognized_method(self): + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, nx.spectral_ordering, G, method="unknown") + + @pytest.mark.parametrize("method", methods) + def test_three_nodes(self, method): + pytest.importorskip("scipy") + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1)], weight="spam") + order = nx.spectral_ordering(G, weight="spam", method=method) + assert set(order) == set(G) + assert {1, 3} in (set(order[:-1]), set(order[1:])) + + @pytest.mark.parametrize("method", methods) + def test_three_nodes_multigraph(self, method): + pytest.importorskip("scipy") + G = nx.MultiDiGraph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1), (2, 3, 2)]) + order = nx.spectral_ordering(G, method=method) + assert set(order) == set(G) + assert {2, 3} in (set(order[:-1]), set(order[1:])) + + @pytest.mark.parametrize("method", methods) + def test_path(self, method): + pytest.importorskip("scipy") + path = list(range(10)) + np.random.shuffle(path) + G = nx.Graph() + nx.add_path(G, path) + order = nx.spectral_ordering(G, method=method) + assert order in [path, list(reversed(path))] + + @pytest.mark.parametrize("method", methods) + def test_seed_argument(self, method): + pytest.importorskip("scipy") + path = list(range(10)) + np.random.shuffle(path) + G = nx.Graph() + nx.add_path(G, path) + order = nx.spectral_ordering(G, method=method, seed=1) + assert order in [path, list(reversed(path))] + + @pytest.mark.parametrize("method", methods) + def test_disconnected(self, method): + pytest.importorskip("scipy") + G = nx.Graph() + nx.add_path(G, range(0, 10, 2)) + nx.add_path(G, range(1, 10, 2)) + order = nx.spectral_ordering(G, method=method) + assert set(order) == set(G) + seqs = [ + list(range(0, 10, 2)), + list(range(8, -1, -2)), + list(range(1, 10, 2)), + list(range(9, -1, -2)), + ] + assert order[:5] in seqs + assert order[5:] in seqs + + @pytest.mark.parametrize( + ("normalized", "expected_order"), + ( + (False, [[1, 2, 0, 3, 4, 5, 6, 9, 7, 8], [8, 7, 9, 6, 5, 4, 3, 0, 2, 1]]), + (True, [[1, 2, 3, 0, 4, 5, 9, 6, 7, 8], [8, 7, 6, 9, 5, 4, 0, 3, 2, 1]]), + ), + ) + @pytest.mark.parametrize("method", methods) + def test_cycle(self, normalized, expected_order, method): + pytest.importorskip("scipy") + path = list(range(10)) + G = nx.Graph() + nx.add_path(G, path, weight=5) + G.add_edge(path[-1], path[0], weight=1) + A = nx.laplacian_matrix(G).todense() + order = nx.spectral_ordering(G, normalized=normalized, method=method) + assert order in expected_order diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/test_attrmatrix.py b/phivenv/Lib/site-packages/networkx/linalg/tests/test_attrmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..01574bb3b8f284edef6c7f92fe1c7e7a239e0610 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/tests/test_attrmatrix.py @@ -0,0 +1,108 @@ +import pytest + +np = pytest.importorskip("numpy") + +import networkx as nx + + +def test_attr_matrix(): + G = nx.Graph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + + def node_attr(u): + return G.nodes[u].get("size", 0.5) * 3 + + def edge_attr(u, v): + return G[u][v].get("thickness", 0.5) + + M = nx.attr_matrix(G, edge_attr=edge_attr, node_attr=node_attr) + np.testing.assert_equal(M[0], np.array([[6.0]])) + assert M[1] == [1.5] + + +def test_attr_matrix_directed(): + G = nx.DiGraph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + M = nx.attr_matrix(G, rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 1., 1.], + [0., 0., 1.], + [0., 0., 0.]] + ) + # fmt: on + np.testing.assert_equal(M, np.array(data)) + + +def test_attr_matrix_multigraph(): + G = nx.MultiGraph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + M = nx.attr_matrix(G, rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 3., 1.], + [3., 0., 1.], + [1., 1., 0.]] + ) + # fmt: on + np.testing.assert_equal(M, np.array(data)) + M = nx.attr_matrix(G, edge_attr="weight", rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 9., 1.], + [9., 0., 1.], + [1., 1., 0.]] + ) + # fmt: on + np.testing.assert_equal(M, np.array(data)) + M = nx.attr_matrix(G, edge_attr="thickness", rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 3., 2.], + [3., 0., 3.], + [2., 3., 0.]] + ) + # fmt: on + np.testing.assert_equal(M, np.array(data)) + + +def test_attr_sparse_matrix(): + pytest.importorskip("scipy") + G = nx.Graph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + M = nx.attr_sparse_matrix(G) + mtx = M[0] + data = np.ones((3, 3), float) + np.fill_diagonal(data, 0) + np.testing.assert_equal(mtx.todense(), np.array(data)) + assert M[1] == [0, 1, 2] + + +def test_attr_sparse_matrix_directed(): + pytest.importorskip("scipy") + G = nx.DiGraph() + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 1, thickness=1, weight=3) + G.add_edge(0, 2, thickness=2) + G.add_edge(1, 2, thickness=3) + M = nx.attr_sparse_matrix(G, rc_order=[0, 1, 2]) + # fmt: off + data = np.array( + [[0., 1., 1.], + [0., 0., 1.], + [0., 0., 0.]] + ) + # fmt: on + np.testing.assert_equal(M.todense(), np.array(data)) diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/test_bethehessian.py b/phivenv/Lib/site-packages/networkx/linalg/tests/test_bethehessian.py new file mode 100644 index 0000000000000000000000000000000000000000..339fe1be390b40083efdd61f1cae4ff62838fc93 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/tests/test_bethehessian.py @@ -0,0 +1,41 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.generators.degree_seq import havel_hakimi_graph + + +class TestBetheHessian: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = havel_hakimi_graph(deg) + cls.P = nx.path_graph(3) + + def test_bethe_hessian(self): + "Bethe Hessian matrix" + # fmt: off + H = np.array([[4, -2, 0], + [-2, 5, -2], + [0, -2, 4]]) + # fmt: on + permutation = [2, 0, 1] + # Bethe Hessian gives expected form + np.testing.assert_equal(nx.bethe_hessian_matrix(self.P, r=2).todense(), H) + # nodelist is correctly implemented + np.testing.assert_equal( + nx.bethe_hessian_matrix(self.P, r=2, nodelist=permutation).todense(), + H[np.ix_(permutation, permutation)], + ) + # Equal to Laplacian matrix when r=1 + np.testing.assert_equal( + nx.bethe_hessian_matrix(self.G, r=1).todense(), + nx.laplacian_matrix(self.G).todense(), + ) + # Correct default for the regularizer r + np.testing.assert_equal( + nx.bethe_hessian_matrix(self.G).todense(), + nx.bethe_hessian_matrix(self.G, r=1.25).todense(), + ) diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/test_graphmatrix.py b/phivenv/Lib/site-packages/networkx/linalg/tests/test_graphmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..519198bc07b32f16c1c0ae0cd9b8bbe6b81bce62 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/tests/test_graphmatrix.py @@ -0,0 +1,276 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.generators.degree_seq import havel_hakimi_graph + + +def test_incidence_matrix_simple(): + deg = [3, 2, 2, 1, 0] + G = havel_hakimi_graph(deg) + deg = [(1, 0), (1, 0), (1, 0), (2, 0), (1, 0), (2, 1), (0, 1), (0, 1)] + MG = nx.random_clustered_graph(deg, seed=42) + + I = nx.incidence_matrix(G, dtype=int).todense() + # fmt: off + expected = np.array( + [[1, 1, 1, 0], + [0, 1, 0, 1], + [1, 0, 0, 1], + [0, 0, 1, 0], + [0, 0, 0, 0]] + ) + # fmt: on + np.testing.assert_equal(I, expected) + + I = nx.incidence_matrix(MG, dtype=int).todense() + # fmt: off + expected = np.array( + [[1, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 0, 1]] + ) + # fmt: on + np.testing.assert_equal(I, expected) + + with pytest.raises(NetworkXError): + nx.incidence_matrix(G, nodelist=[0, 1]) + + +class TestGraphMatrix: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = havel_hakimi_graph(deg) + # fmt: off + cls.OI = np.array( + [[-1, -1, -1, 0], + [1, 0, 0, -1], + [0, 1, 0, 1], + [0, 0, 1, 0], + [0, 0, 0, 0]] + ) + cls.A = np.array( + [[0, 1, 1, 1, 0], + [1, 0, 1, 0, 0], + [1, 1, 0, 0, 0], + [1, 0, 0, 0, 0], + [0, 0, 0, 0, 0]] + ) + # fmt: on + cls.WG = havel_hakimi_graph(deg) + cls.WG.add_edges_from( + (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.G.edges() + ) + # fmt: off + cls.WA = np.array( + [[0, 0.5, 0.5, 0.5, 0], + [0.5, 0, 0.5, 0, 0], + [0.5, 0.5, 0, 0, 0], + [0.5, 0, 0, 0, 0], + [0, 0, 0, 0, 0]] + ) + # fmt: on + cls.MG = nx.MultiGraph(cls.G) + cls.MG2 = cls.MG.copy() + cls.MG2.add_edge(0, 1) + # fmt: off + cls.MG2A = np.array( + [[0, 2, 1, 1, 0], + [2, 0, 1, 0, 0], + [1, 1, 0, 0, 0], + [1, 0, 0, 0, 0], + [0, 0, 0, 0, 0]] + ) + cls.MGOI = np.array( + [[-1, -1, -1, -1, 0], + [1, 1, 0, 0, -1], + [0, 0, 1, 0, 1], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 0]] + ) + # fmt: on + cls.no_edges_G = nx.Graph([(1, 2), (3, 2, {"weight": 8})]) + cls.no_edges_A = np.array([[0, 0], [0, 0]]) + + def test_incidence_matrix(self): + "Conversion to incidence matrix" + I = nx.incidence_matrix( + self.G, + nodelist=sorted(self.G), + edgelist=sorted(self.G.edges()), + oriented=True, + dtype=int, + ).todense() + np.testing.assert_equal(I, self.OI) + + I = nx.incidence_matrix( + self.G, + nodelist=sorted(self.G), + edgelist=sorted(self.G.edges()), + oriented=False, + dtype=int, + ).todense() + np.testing.assert_equal(I, np.abs(self.OI)) + + I = nx.incidence_matrix( + self.MG, + nodelist=sorted(self.MG), + edgelist=sorted(self.MG.edges()), + oriented=True, + dtype=int, + ).todense() + np.testing.assert_equal(I, self.OI) + + I = nx.incidence_matrix( + self.MG, + nodelist=sorted(self.MG), + edgelist=sorted(self.MG.edges()), + oriented=False, + dtype=int, + ).todense() + np.testing.assert_equal(I, np.abs(self.OI)) + + I = nx.incidence_matrix( + self.MG2, + nodelist=sorted(self.MG2), + edgelist=sorted(self.MG2.edges()), + oriented=True, + dtype=int, + ).todense() + np.testing.assert_equal(I, self.MGOI) + + I = nx.incidence_matrix( + self.MG2, + nodelist=sorted(self.MG), + edgelist=sorted(self.MG2.edges()), + oriented=False, + dtype=int, + ).todense() + np.testing.assert_equal(I, np.abs(self.MGOI)) + + I = nx.incidence_matrix(self.G, dtype=np.uint8) + assert I.dtype == np.uint8 + + def test_weighted_incidence_matrix(self): + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=True, + dtype=int, + ).todense() + np.testing.assert_equal(I, self.OI) + + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=False, + dtype=int, + ).todense() + np.testing.assert_equal(I, np.abs(self.OI)) + + # np.testing.assert_equal(nx.incidence_matrix(self.WG,oriented=True, + # weight='weight').todense(),0.5*self.OI) + # np.testing.assert_equal(nx.incidence_matrix(self.WG,weight='weight').todense(), + # np.abs(0.5*self.OI)) + # np.testing.assert_equal(nx.incidence_matrix(self.WG,oriented=True,weight='other').todense(), + # 0.3*self.OI) + + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=True, + weight="weight", + ).todense() + np.testing.assert_equal(I, 0.5 * self.OI) + + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=False, + weight="weight", + ).todense() + np.testing.assert_equal(I, np.abs(0.5 * self.OI)) + + I = nx.incidence_matrix( + self.WG, + nodelist=sorted(self.WG), + edgelist=sorted(self.WG.edges()), + oriented=True, + weight="other", + ).todense() + np.testing.assert_equal(I, 0.3 * self.OI) + + # WMG=nx.MultiGraph(self.WG) + # WMG.add_edge(0,1,weight=0.5,other=0.3) + # np.testing.assert_equal(nx.incidence_matrix(WMG,weight='weight').todense(), + # np.abs(0.5*self.MGOI)) + # np.testing.assert_equal(nx.incidence_matrix(WMG,weight='weight',oriented=True).todense(), + # 0.5*self.MGOI) + # np.testing.assert_equal(nx.incidence_matrix(WMG,weight='other',oriented=True).todense(), + # 0.3*self.MGOI) + + WMG = nx.MultiGraph(self.WG) + WMG.add_edge(0, 1, weight=0.5, other=0.3) + + I = nx.incidence_matrix( + WMG, + nodelist=sorted(WMG), + edgelist=sorted(WMG.edges(keys=True)), + oriented=True, + weight="weight", + ).todense() + np.testing.assert_equal(I, 0.5 * self.MGOI) + + I = nx.incidence_matrix( + WMG, + nodelist=sorted(WMG), + edgelist=sorted(WMG.edges(keys=True)), + oriented=False, + weight="weight", + ).todense() + np.testing.assert_equal(I, np.abs(0.5 * self.MGOI)) + + I = nx.incidence_matrix( + WMG, + nodelist=sorted(WMG), + edgelist=sorted(WMG.edges(keys=True)), + oriented=True, + weight="other", + ).todense() + np.testing.assert_equal(I, 0.3 * self.MGOI) + + def test_adjacency_matrix(self): + "Conversion to adjacency matrix" + np.testing.assert_equal(nx.adjacency_matrix(self.G).todense(), self.A) + np.testing.assert_equal(nx.adjacency_matrix(self.MG).todense(), self.A) + np.testing.assert_equal(nx.adjacency_matrix(self.MG2).todense(), self.MG2A) + np.testing.assert_equal( + nx.adjacency_matrix(self.G, nodelist=[0, 1]).todense(), self.A[:2, :2] + ) + np.testing.assert_equal(nx.adjacency_matrix(self.WG).todense(), self.WA) + np.testing.assert_equal( + nx.adjacency_matrix(self.WG, weight=None).todense(), self.A + ) + np.testing.assert_equal( + nx.adjacency_matrix(self.MG2, weight=None).todense(), self.MG2A + ) + np.testing.assert_equal( + nx.adjacency_matrix(self.WG, weight="other").todense(), 0.6 * self.WA + ) + np.testing.assert_equal( + nx.adjacency_matrix(self.no_edges_G, nodelist=[1, 3]).todense(), + self.no_edges_A, + ) diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/test_laplacian.py b/phivenv/Lib/site-packages/networkx/linalg/tests/test_laplacian.py new file mode 100644 index 0000000000000000000000000000000000000000..78cddacd3bd9ea780fa3afc04746bc7540513268 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/tests/test_laplacian.py @@ -0,0 +1,242 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.generators.degree_seq import havel_hakimi_graph +from networkx.generators.expanders import margulis_gabber_galil_graph + + +class TestLaplacian: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = havel_hakimi_graph(deg) + cls.WG = nx.Graph( + (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.G.edges() + ) + cls.WG.add_node(4) + cls.MG = nx.MultiGraph(cls.G) + + # Graph with clsloops + cls.Gsl = cls.G.copy() + for node in cls.Gsl.nodes(): + cls.Gsl.add_edge(node, node) + + def test_laplacian(self): + "Graph Laplacian" + # fmt: off + NL = np.array([[ 3, -1, -1, -1, 0], + [-1, 2, -1, 0, 0], + [-1, -1, 2, 0, 0], + [-1, 0, 0, 1, 0], + [ 0, 0, 0, 0, 0]]) + # fmt: on + WL = 0.5 * NL + OL = 0.3 * NL + np.testing.assert_equal(nx.laplacian_matrix(self.G).todense(), NL) + np.testing.assert_equal(nx.laplacian_matrix(self.MG).todense(), NL) + np.testing.assert_equal( + nx.laplacian_matrix(self.G, nodelist=[0, 1]).todense(), + np.array([[1, -1], [-1, 1]]), + ) + np.testing.assert_equal(nx.laplacian_matrix(self.WG).todense(), WL) + np.testing.assert_equal(nx.laplacian_matrix(self.WG, weight=None).todense(), NL) + np.testing.assert_equal( + nx.laplacian_matrix(self.WG, weight="other").todense(), OL + ) + + def test_normalized_laplacian(self): + "Generalized Graph Laplacian" + # fmt: off + G = np.array([[ 1. , -0.408, -0.408, -0.577, 0.], + [-0.408, 1. , -0.5 , 0. , 0.], + [-0.408, -0.5 , 1. , 0. , 0.], + [-0.577, 0. , 0. , 1. , 0.], + [ 0. , 0. , 0. , 0. , 0.]]) + GL = np.array([[ 1. , -0.408, -0.408, -0.577, 0. ], + [-0.408, 1. , -0.5 , 0. , 0. ], + [-0.408, -0.5 , 1. , 0. , 0. ], + [-0.577, 0. , 0. , 1. , 0. ], + [ 0. , 0. , 0. , 0. , 0. ]]) + Lsl = np.array([[ 0.75 , -0.2887, -0.2887, -0.3536, 0. ], + [-0.2887, 0.6667, -0.3333, 0. , 0. ], + [-0.2887, -0.3333, 0.6667, 0. , 0. ], + [-0.3536, 0. , 0. , 0.5 , 0. ], + [ 0. , 0. , 0. , 0. , 0. ]]) + # fmt: on + + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.G, nodelist=range(5)).todense(), + G, + decimal=3, + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.G).todense(), GL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.MG).todense(), GL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.WG).todense(), GL, decimal=3 + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.WG, weight="other").todense(), + GL, + decimal=3, + ) + np.testing.assert_almost_equal( + nx.normalized_laplacian_matrix(self.Gsl).todense(), Lsl, decimal=3 + ) + + +def test_directed_laplacian(): + "Directed Laplacian" + # Graph used as an example in Sec. 4.1 of Langville and Meyer, + # "Google's PageRank and Beyond". The graph contains dangling nodes, so + # the pagerank random walk is selected by directed_laplacian + G = nx.DiGraph() + G.add_edges_from( + ( + (1, 2), + (1, 3), + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ) + ) + # fmt: off + GL = np.array([[ 0.9833, -0.2941, -0.3882, -0.0291, -0.0231, -0.0261], + [-0.2941, 0.8333, -0.2339, -0.0536, -0.0589, -0.0554], + [-0.3882, -0.2339, 0.9833, -0.0278, -0.0896, -0.0251], + [-0.0291, -0.0536, -0.0278, 0.9833, -0.4878, -0.6675], + [-0.0231, -0.0589, -0.0896, -0.4878, 0.9833, -0.2078], + [-0.0261, -0.0554, -0.0251, -0.6675, -0.2078, 0.9833]]) + # fmt: on + L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G)) + np.testing.assert_almost_equal(L, GL, decimal=3) + + # Make the graph strongly connected, so we can use a random and lazy walk + G.add_edges_from(((2, 5), (6, 1))) + # fmt: off + GL = np.array([[ 1. , -0.3062, -0.4714, 0. , 0. , -0.3227], + [-0.3062, 1. , -0.1443, 0. , -0.3162, 0. ], + [-0.4714, -0.1443, 1. , 0. , -0.0913, 0. ], + [ 0. , 0. , 0. , 1. , -0.5 , -0.5 ], + [ 0. , -0.3162, -0.0913, -0.5 , 1. , -0.25 ], + [-0.3227, 0. , 0. , -0.5 , -0.25 , 1. ]]) + # fmt: on + L = nx.directed_laplacian_matrix( + G, alpha=0.9, nodelist=sorted(G), walk_type="random" + ) + np.testing.assert_almost_equal(L, GL, decimal=3) + + # fmt: off + GL = np.array([[ 0.5 , -0.1531, -0.2357, 0. , 0. , -0.1614], + [-0.1531, 0.5 , -0.0722, 0. , -0.1581, 0. ], + [-0.2357, -0.0722, 0.5 , 0. , -0.0456, 0. ], + [ 0. , 0. , 0. , 0.5 , -0.25 , -0.25 ], + [ 0. , -0.1581, -0.0456, -0.25 , 0.5 , -0.125 ], + [-0.1614, 0. , 0. , -0.25 , -0.125 , 0.5 ]]) + # fmt: on + L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G), walk_type="lazy") + np.testing.assert_almost_equal(L, GL, decimal=3) + + # Make a strongly connected periodic graph + G = nx.DiGraph() + G.add_edges_from(((1, 2), (2, 4), (4, 1), (1, 3), (3, 4))) + # fmt: off + GL = np.array([[ 0.5 , -0.176, -0.176, -0.25 ], + [-0.176, 0.5 , 0. , -0.176], + [-0.176, 0. , 0.5 , -0.176], + [-0.25 , -0.176, -0.176, 0.5 ]]) + # fmt: on + L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G)) + np.testing.assert_almost_equal(L, GL, decimal=3) + + +def test_directed_combinatorial_laplacian(): + "Directed combinatorial Laplacian" + # Graph used as an example in Sec. 4.1 of Langville and Meyer, + # "Google's PageRank and Beyond". The graph contains dangling nodes, so + # the pagerank random walk is selected by directed_laplacian + G = nx.DiGraph() + G.add_edges_from( + ( + (1, 2), + (1, 3), + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ) + ) + # fmt: off + GL = np.array([[ 0.0366, -0.0132, -0.0153, -0.0034, -0.0020, -0.0027], + [-0.0132, 0.0450, -0.0111, -0.0076, -0.0062, -0.0069], + [-0.0153, -0.0111, 0.0408, -0.0035, -0.0083, -0.0027], + [-0.0034, -0.0076, -0.0035, 0.3688, -0.1356, -0.2187], + [-0.0020, -0.0062, -0.0083, -0.1356, 0.2026, -0.0505], + [-0.0027, -0.0069, -0.0027, -0.2187, -0.0505, 0.2815]]) + # fmt: on + + L = nx.directed_combinatorial_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G)) + np.testing.assert_almost_equal(L, GL, decimal=3) + + # Make the graph strongly connected, so we can use a random and lazy walk + G.add_edges_from(((2, 5), (6, 1))) + + # fmt: off + GL = np.array([[ 0.1395, -0.0349, -0.0465, 0. , 0. , -0.0581], + [-0.0349, 0.093 , -0.0116, 0. , -0.0465, 0. ], + [-0.0465, -0.0116, 0.0698, 0. , -0.0116, 0. ], + [ 0. , 0. , 0. , 0.2326, -0.1163, -0.1163], + [ 0. , -0.0465, -0.0116, -0.1163, 0.2326, -0.0581], + [-0.0581, 0. , 0. , -0.1163, -0.0581, 0.2326]]) + # fmt: on + + L = nx.directed_combinatorial_laplacian_matrix( + G, alpha=0.9, nodelist=sorted(G), walk_type="random" + ) + np.testing.assert_almost_equal(L, GL, decimal=3) + + # fmt: off + GL = np.array([[ 0.0698, -0.0174, -0.0233, 0. , 0. , -0.0291], + [-0.0174, 0.0465, -0.0058, 0. , -0.0233, 0. ], + [-0.0233, -0.0058, 0.0349, 0. , -0.0058, 0. ], + [ 0. , 0. , 0. , 0.1163, -0.0581, -0.0581], + [ 0. , -0.0233, -0.0058, -0.0581, 0.1163, -0.0291], + [-0.0291, 0. , 0. , -0.0581, -0.0291, 0.1163]]) + # fmt: on + + L = nx.directed_combinatorial_laplacian_matrix( + G, alpha=0.9, nodelist=sorted(G), walk_type="lazy" + ) + np.testing.assert_almost_equal(L, GL, decimal=3) + + E = nx.DiGraph(margulis_gabber_galil_graph(2)) + L = nx.directed_combinatorial_laplacian_matrix(E) + # fmt: off + expected = np.array( + [[ 0.16666667, -0.08333333, -0.08333333, 0. ], + [-0.08333333, 0.16666667, 0. , -0.08333333], + [-0.08333333, 0. , 0.16666667, -0.08333333], + [ 0. , -0.08333333, -0.08333333, 0.16666667]] + ) + # fmt: on + np.testing.assert_almost_equal(L, expected, decimal=6) + + with pytest.raises(nx.NetworkXError): + nx.directed_combinatorial_laplacian_matrix(G, walk_type="pagerank", alpha=100) + with pytest.raises(nx.NetworkXError): + nx.directed_combinatorial_laplacian_matrix(G, walk_type="silly") diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/test_modularity.py b/phivenv/Lib/site-packages/networkx/linalg/tests/test_modularity.py new file mode 100644 index 0000000000000000000000000000000000000000..9f94ff4db33a427fa2f0ef51470bc1c57c8b8682 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/tests/test_modularity.py @@ -0,0 +1,87 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.generators.degree_seq import havel_hakimi_graph + + +class TestModularity: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = havel_hakimi_graph(deg) + # Graph used as an example in Sec. 4.1 of Langville and Meyer, + # "Google's PageRank and Beyond". (Used for test_directed_laplacian) + cls.DG = nx.DiGraph() + cls.DG.add_edges_from( + ( + (1, 2), + (1, 3), + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ) + ) + + def test_modularity(self): + "Modularity matrix" + # fmt: off + B = np.array([[-1.125, 0.25, 0.25, 0.625, 0.], + [0.25, -0.5, 0.5, -0.25, 0.], + [0.25, 0.5, -0.5, -0.25, 0.], + [0.625, -0.25, -0.25, -0.125, 0.], + [0., 0., 0., 0., 0.]]) + # fmt: on + + permutation = [4, 0, 1, 2, 3] + np.testing.assert_equal(nx.modularity_matrix(self.G), B) + np.testing.assert_equal( + nx.modularity_matrix(self.G, nodelist=permutation), + B[np.ix_(permutation, permutation)], + ) + + def test_modularity_weight(self): + "Modularity matrix with weights" + # fmt: off + B = np.array([[-1.125, 0.25, 0.25, 0.625, 0.], + [0.25, -0.5, 0.5, -0.25, 0.], + [0.25, 0.5, -0.5, -0.25, 0.], + [0.625, -0.25, -0.25, -0.125, 0.], + [0., 0., 0., 0., 0.]]) + # fmt: on + + G_weighted = self.G.copy() + for n1, n2 in G_weighted.edges(): + G_weighted.edges[n1, n2]["weight"] = 0.5 + # The following test would fail in networkx 1.1 + np.testing.assert_equal(nx.modularity_matrix(G_weighted), B) + # The following test that the modularity matrix get rescaled accordingly + np.testing.assert_equal( + nx.modularity_matrix(G_weighted, weight="weight"), 0.5 * B + ) + + def test_directed_modularity(self): + "Directed Modularity matrix" + # fmt: off + B = np.array([[-0.2, 0.6, 0.8, -0.4, -0.4, -0.4], + [0., 0., 0., 0., 0., 0.], + [0.7, 0.4, -0.3, -0.6, 0.4, -0.6], + [-0.2, -0.4, -0.2, -0.4, 0.6, 0.6], + [-0.2, -0.4, -0.2, 0.6, -0.4, 0.6], + [-0.1, -0.2, -0.1, 0.8, -0.2, -0.2]]) + # fmt: on + node_permutation = [5, 1, 2, 3, 4, 6] + idx_permutation = [4, 0, 1, 2, 3, 5] + mm = nx.directed_modularity_matrix(self.DG, nodelist=sorted(self.DG)) + np.testing.assert_equal(mm, B) + np.testing.assert_equal( + nx.directed_modularity_matrix(self.DG, nodelist=node_permutation), + B[np.ix_(idx_permutation, idx_permutation)], + ) diff --git a/phivenv/Lib/site-packages/networkx/linalg/tests/test_spectrum.py b/phivenv/Lib/site-packages/networkx/linalg/tests/test_spectrum.py new file mode 100644 index 0000000000000000000000000000000000000000..e9101303cba60c56825101fa5762b56a3083e7af --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/linalg/tests/test_spectrum.py @@ -0,0 +1,71 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.generators.degree_seq import havel_hakimi_graph + + +class TestSpectrum: + @classmethod + def setup_class(cls): + deg = [3, 2, 2, 1, 0] + cls.G = havel_hakimi_graph(deg) + cls.P = nx.path_graph(3) + cls.WG = nx.Graph( + (u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.G.edges() + ) + cls.WG.add_node(4) + cls.DG = nx.DiGraph() + nx.add_path(cls.DG, [0, 1, 2]) + + def test_laplacian_spectrum(self): + "Laplacian eigenvalues" + evals = np.array([0, 0, 1, 3, 4]) + e = sorted(nx.laplacian_spectrum(self.G)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.laplacian_spectrum(self.WG, weight=None)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.laplacian_spectrum(self.WG)) + np.testing.assert_almost_equal(e, 0.5 * evals) + e = sorted(nx.laplacian_spectrum(self.WG, weight="other")) + np.testing.assert_almost_equal(e, 0.3 * evals) + + def test_normalized_laplacian_spectrum(self): + "Normalized Laplacian eigenvalues" + evals = np.array([0, 0, 0.7712864461218, 1.5, 1.7287135538781]) + e = sorted(nx.normalized_laplacian_spectrum(self.G)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.normalized_laplacian_spectrum(self.WG, weight=None)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.normalized_laplacian_spectrum(self.WG)) + np.testing.assert_almost_equal(e, evals) + e = sorted(nx.normalized_laplacian_spectrum(self.WG, weight="other")) + np.testing.assert_almost_equal(e, evals) + + def test_adjacency_spectrum(self): + "Adjacency eigenvalues" + evals = np.array([-np.sqrt(2), 0, np.sqrt(2)]) + e = sorted(nx.adjacency_spectrum(self.P)) + np.testing.assert_almost_equal(e, evals) + + def test_modularity_spectrum(self): + "Modularity eigenvalues" + evals = np.array([-1.5, 0.0, 0.0]) + e = sorted(nx.modularity_spectrum(self.P)) + np.testing.assert_almost_equal(e, evals) + # Directed modularity eigenvalues + evals = np.array([-0.5, 0.0, 0.0]) + e = sorted(nx.modularity_spectrum(self.DG)) + np.testing.assert_almost_equal(e, evals) + + def test_bethe_hessian_spectrum(self): + "Bethe Hessian eigenvalues" + evals = np.array([0.5 * (9 - np.sqrt(33)), 4, 0.5 * (9 + np.sqrt(33))]) + e = sorted(nx.bethe_hessian_spectrum(self.P, r=2)) + np.testing.assert_almost_equal(e, evals) + # Collapses back to Laplacian: + e1 = sorted(nx.bethe_hessian_spectrum(self.P, r=1)) + e2 = sorted(nx.laplacian_spectrum(self.P)) + np.testing.assert_almost_equal(e1, e2) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__init__.py b/phivenv/Lib/site-packages/networkx/readwrite/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f65509824a10732435057c0fdb3ed0d90636b63c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/__init__.py @@ -0,0 +1,18 @@ +""" +A package for reading and writing graphs in various formats. + +""" + + +from networkx.readwrite.adjlist import * +from networkx.readwrite.multiline_adjlist import * +from networkx.readwrite.edgelist import * +from networkx.readwrite.pajek import * +from networkx.readwrite.leda import * +from networkx.readwrite.sparse6 import * +from networkx.readwrite.graph6 import * +from networkx.readwrite.gml import * +from networkx.readwrite.graphml import * +from networkx.readwrite.gexf import * +from networkx.readwrite.json_graph import * +from networkx.readwrite.text import * diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7541acc6eed8e655f57346633df7d728dbc84d53 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/adjlist.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/adjlist.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86de0b52212c1f4de5ffeab0eda710f5d15e55ea Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/adjlist.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/edgelist.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/edgelist.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..722f0a211f2b06b1014bbbaa533522015efbc614 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/edgelist.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/gexf.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/gexf.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2e93e08576b90f86aca105aece3562853bf2258 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/gexf.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/gml.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/gml.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd4fcafba6cc863b78bbb9f8c00e0497543d2d55 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/gml.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/graph6.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/graph6.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..101550e5afd82d43c983e877f80d5535fc4f8903 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/graph6.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/graphml.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/graphml.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f69f5723b34cdc58a37c7bfe790ae0c2fcb9d49 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/graphml.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/leda.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/leda.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a13d39aa17744482412cf50f4c76d7d5aea635d9 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/leda.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/multiline_adjlist.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/multiline_adjlist.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d31fe2c3e221b81ff7e41d13a3c49a534e5115e Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/multiline_adjlist.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/p2g.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/p2g.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd3a1493c53618c918f97e253020021ccda2b76a Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/p2g.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/pajek.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/pajek.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8811d817e10788c4fb606794f7a9c6d7a5db93d4 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/pajek.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/sparse6.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/sparse6.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0aee7995e756e1f0bcde0979046ef87df8b62fc Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/sparse6.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/text.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/text.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e6fefeac861e4dfcd62d04fd5c90a9fae857886 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/__pycache__/text.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/adjlist.py b/phivenv/Lib/site-packages/networkx/readwrite/adjlist.py new file mode 100644 index 0000000000000000000000000000000000000000..20933695723411e4c480aa329dc9239350cd1504 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/adjlist.py @@ -0,0 +1,310 @@ +""" +************** +Adjacency List +************** +Read and write NetworkX graphs as adjacency lists. + +Adjacency list format is useful for graphs without data associated +with nodes or edges and for nodes that can be meaningfully represented +as strings. + +Format +------ +The adjacency list format consists of lines with node labels. The +first label in a line is the source node. Further labels in the line +are considered target nodes and are added to the graph along with an edge +between the source node and target node. + +The graph with edges a-b, a-c, d-e can be represented as the following +adjacency list (anything following the # in a line is a comment):: + + a b c # source target target + d e +""" + +__all__ = ["generate_adjlist", "write_adjlist", "parse_adjlist", "read_adjlist"] + +import networkx as nx +from networkx.utils import open_file + + +def generate_adjlist(G, delimiter=" "): + """Generate a single line of the graph G in adjacency list format. + + Parameters + ---------- + G : NetworkX graph + + delimiter : string, optional + Separator for node labels + + Returns + ------- + lines : string + Lines of data in adjlist format. + + Examples + -------- + >>> G = nx.lollipop_graph(4, 3) + >>> for line in nx.generate_adjlist(G): + ... print(line) + 0 1 2 3 + 1 2 3 + 2 3 + 3 4 + 4 5 + 5 6 + 6 + + See Also + -------- + write_adjlist, read_adjlist + + Notes + ----- + The default `delimiter=" "` will result in unexpected results if node names contain + whitespace characters. To avoid this problem, specify an alternate delimiter when spaces are + valid in node names. + + NB: This option is not available for data that isn't user-generated. + + """ + directed = G.is_directed() + seen = set() + for s, nbrs in G.adjacency(): + line = str(s) + delimiter + for t, data in nbrs.items(): + if not directed and t in seen: + continue + if G.is_multigraph(): + for d in data.values(): + line += str(t) + delimiter + else: + line += str(t) + delimiter + if not directed: + seen.add(s) + yield line[: -len(delimiter)] + + +@open_file(1, mode="wb") +def write_adjlist(G, path, comments="#", delimiter=" ", encoding="utf-8"): + """Write graph G in single-line adjacency-list format to path. + + + Parameters + ---------- + G : NetworkX graph + + path : string or file + Filename or file handle for data output. + Filenames ending in .gz or .bz2 will be compressed. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels + + encoding : string, optional + Text encoding. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_adjlist(G, "test.adjlist") + + The path can be a filehandle or a string with the name of the file. If a + filehandle is provided, it has to be opened in 'wb' mode. + + >>> fh = open("test.adjlist", "wb") + >>> nx.write_adjlist(G, fh) + + Notes + ----- + The default `delimiter=" "` will result in unexpected results if node names contain + whitespace characters. To avoid this problem, specify an alternate delimiter when spaces are + valid in node names. + NB: This option is not available for data that isn't user-generated. + + This format does not store graph, node, or edge data. + + See Also + -------- + read_adjlist, generate_adjlist + """ + import sys + import time + + pargs = comments + " ".join(sys.argv) + "\n" + header = ( + pargs + + comments + + f" GMT {time.asctime(time.gmtime())}\n" + + comments + + f" {G.name}\n" + ) + path.write(header.encode(encoding)) + + for line in generate_adjlist(G, delimiter): + line += "\n" + path.write(line.encode(encoding)) + + +@nx._dispatch(graphs=None) +def parse_adjlist( + lines, comments="#", delimiter=None, create_using=None, nodetype=None +): + """Parse lines of a graph adjacency list representation. + + Parameters + ---------- + lines : list or iterator of strings + Input data in adjlist format + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + nodetype : Python type, optional + Convert nodes to this type. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels. The default is whitespace. + + Returns + ------- + G: NetworkX graph + The graph corresponding to the lines in adjacency list format. + + Examples + -------- + >>> lines = ["1 2 5", "2 3 4", "3 5", "4", "5"] + >>> G = nx.parse_adjlist(lines, nodetype=int) + >>> nodes = [1, 2, 3, 4, 5] + >>> all(node in G for node in nodes) + True + >>> edges = [(1, 2), (1, 5), (2, 3), (2, 4), (3, 5)] + >>> all((u, v) in G.edges() or (v, u) in G.edges() for (u, v) in edges) + True + + See Also + -------- + read_adjlist + + """ + G = nx.empty_graph(0, create_using) + for line in lines: + p = line.find(comments) + if p >= 0: + line = line[:p] + if not len(line): + continue + vlist = line.strip().split(delimiter) + u = vlist.pop(0) + # convert types + if nodetype is not None: + try: + u = nodetype(u) + except BaseException as err: + raise TypeError( + f"Failed to convert node ({u}) to type " f"{nodetype}" + ) from err + G.add_node(u) + if nodetype is not None: + try: + vlist = list(map(nodetype, vlist)) + except BaseException as err: + raise TypeError( + f"Failed to convert nodes ({','.join(vlist)}) to type {nodetype}" + ) from err + G.add_edges_from([(u, v) for v in vlist]) + return G + + +@open_file(0, mode="rb") +@nx._dispatch(graphs=None) +def read_adjlist( + path, + comments="#", + delimiter=None, + create_using=None, + nodetype=None, + encoding="utf-8", +): + """Read graph in adjacency list format from path. + + Parameters + ---------- + path : string or file + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be uncompressed. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + nodetype : Python type, optional + Convert nodes to this type. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels. The default is whitespace. + + Returns + ------- + G: NetworkX graph + The graph corresponding to the lines in adjacency list format. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_adjlist(G, "test.adjlist") + >>> G = nx.read_adjlist("test.adjlist") + + The path can be a filehandle or a string with the name of the file. If a + filehandle is provided, it has to be opened in 'rb' mode. + + >>> fh = open("test.adjlist", "rb") + >>> G = nx.read_adjlist(fh) + + Filenames ending in .gz or .bz2 will be compressed. + + >>> nx.write_adjlist(G, "test.adjlist.gz") + >>> G = nx.read_adjlist("test.adjlist.gz") + + The optional nodetype is a function to convert node strings to nodetype. + + For example + + >>> G = nx.read_adjlist("test.adjlist", nodetype=int) + + will attempt to convert all nodes to integer type. + + Since nodes must be hashable, the function nodetype must return hashable + types (e.g. int, float, str, frozenset - or tuples of those, etc.) + + The optional create_using parameter indicates the type of NetworkX graph + created. The default is `nx.Graph`, an undirected graph. + To read the data as a directed graph use + + >>> G = nx.read_adjlist("test.adjlist", create_using=nx.DiGraph) + + Notes + ----- + This format does not store graph or node data. + + See Also + -------- + write_adjlist + """ + lines = (line.decode(encoding) for line in path) + return parse_adjlist( + lines, + comments=comments, + delimiter=delimiter, + create_using=create_using, + nodetype=nodetype, + ) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/edgelist.py b/phivenv/Lib/site-packages/networkx/readwrite/edgelist.py new file mode 100644 index 0000000000000000000000000000000000000000..733b544326c0f2e12cba930b08f91f44ce1c8f09 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/edgelist.py @@ -0,0 +1,489 @@ +""" +********** +Edge Lists +********** +Read and write NetworkX graphs as edge lists. + +The multi-line adjacency list format is useful for graphs with nodes +that can be meaningfully represented as strings. With the edgelist +format simple edge data can be stored but node or graph data is not. +There is no way of representing isolated nodes unless the node has a +self-loop edge. + +Format +------ +You can read or write three formats of edge lists with these functions. + +Node pairs with no data:: + + 1 2 + +Python dictionary as data:: + + 1 2 {'weight':7, 'color':'green'} + +Arbitrary data:: + + 1 2 7 green +""" + +__all__ = [ + "generate_edgelist", + "write_edgelist", + "parse_edgelist", + "read_edgelist", + "read_weighted_edgelist", + "write_weighted_edgelist", +] + +import networkx as nx +from networkx.utils import open_file + + +def generate_edgelist(G, delimiter=" ", data=True): + """Generate a single line of the graph G in edge list format. + + Parameters + ---------- + G : NetworkX graph + + delimiter : string, optional + Separator for node labels + + data : bool or list of keys + If False generate no edge data. If True use a dictionary + representation of edge data. If a list of keys use a list of data + values corresponding to the keys. + + Returns + ------- + lines : string + Lines of data in adjlist format. + + Examples + -------- + >>> G = nx.lollipop_graph(4, 3) + >>> G[1][2]["weight"] = 3 + >>> G[3][4]["capacity"] = 12 + >>> for line in nx.generate_edgelist(G, data=False): + ... print(line) + 0 1 + 0 2 + 0 3 + 1 2 + 1 3 + 2 3 + 3 4 + 4 5 + 5 6 + + >>> for line in nx.generate_edgelist(G): + ... print(line) + 0 1 {} + 0 2 {} + 0 3 {} + 1 2 {'weight': 3} + 1 3 {} + 2 3 {} + 3 4 {'capacity': 12} + 4 5 {} + 5 6 {} + + >>> for line in nx.generate_edgelist(G, data=["weight"]): + ... print(line) + 0 1 + 0 2 + 0 3 + 1 2 3 + 1 3 + 2 3 + 3 4 + 4 5 + 5 6 + + See Also + -------- + write_adjlist, read_adjlist + """ + if data is True: + for u, v, d in G.edges(data=True): + e = u, v, dict(d) + yield delimiter.join(map(str, e)) + elif data is False: + for u, v in G.edges(data=False): + e = u, v + yield delimiter.join(map(str, e)) + else: + for u, v, d in G.edges(data=True): + e = [u, v] + try: + e.extend(d[k] for k in data) + except KeyError: + pass # missing data for this edge, should warn? + yield delimiter.join(map(str, e)) + + +@open_file(1, mode="wb") +def write_edgelist(G, path, comments="#", delimiter=" ", data=True, encoding="utf-8"): + """Write graph as a list of edges. + + Parameters + ---------- + G : graph + A NetworkX graph + path : file or string + File or filename to write. If a file is provided, it must be + opened in 'wb' mode. Filenames ending in .gz or .bz2 will be compressed. + comments : string, optional + The character used to indicate the start of a comment + delimiter : string, optional + The string used to separate values. The default is whitespace. + data : bool or list, optional + If False write no edge data. + If True write a string representation of the edge data dictionary.. + If a list (or other iterable) is provided, write the keys specified + in the list. + encoding: string, optional + Specify which encoding to use when writing file. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_edgelist(G, "test.edgelist") + >>> G = nx.path_graph(4) + >>> fh = open("test.edgelist", "wb") + >>> nx.write_edgelist(G, fh) + >>> nx.write_edgelist(G, "test.edgelist.gz") + >>> nx.write_edgelist(G, "test.edgelist.gz", data=False) + + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=7, color="red") + >>> nx.write_edgelist(G, "test.edgelist", data=False) + >>> nx.write_edgelist(G, "test.edgelist", data=["color"]) + >>> nx.write_edgelist(G, "test.edgelist", data=["color", "weight"]) + + See Also + -------- + read_edgelist + write_weighted_edgelist + """ + + for line in generate_edgelist(G, delimiter, data): + line += "\n" + path.write(line.encode(encoding)) + + +@nx._dispatch(graphs=None) +def parse_edgelist( + lines, comments="#", delimiter=None, create_using=None, nodetype=None, data=True +): + """Parse lines of an edge list representation of a graph. + + Parameters + ---------- + lines : list or iterator of strings + Input data in edgelist format + comments : string, optional + Marker for comment lines. Default is `'#'`. To specify that no character + should be treated as a comment, use ``comments=None``. + delimiter : string, optional + Separator for node labels. Default is `None`, meaning any whitespace. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + nodetype : Python type, optional + Convert nodes to this type. Default is `None`, meaning no conversion is + performed. + data : bool or list of (label,type) tuples + If `False` generate no edge data or if `True` use a dictionary + representation of edge data or a list tuples specifying dictionary + key names and types for edge data. + + Returns + ------- + G: NetworkX Graph + The graph corresponding to lines + + Examples + -------- + Edgelist with no data: + + >>> lines = ["1 2", "2 3", "3 4"] + >>> G = nx.parse_edgelist(lines, nodetype=int) + >>> list(G) + [1, 2, 3, 4] + >>> list(G.edges()) + [(1, 2), (2, 3), (3, 4)] + + Edgelist with data in Python dictionary representation: + + >>> lines = ["1 2 {'weight': 3}", "2 3 {'weight': 27}", "3 4 {'weight': 3.0}"] + >>> G = nx.parse_edgelist(lines, nodetype=int) + >>> list(G) + [1, 2, 3, 4] + >>> list(G.edges(data=True)) + [(1, 2, {'weight': 3}), (2, 3, {'weight': 27}), (3, 4, {'weight': 3.0})] + + Edgelist with data in a list: + + >>> lines = ["1 2 3", "2 3 27", "3 4 3.0"] + >>> G = nx.parse_edgelist(lines, nodetype=int, data=(("weight", float),)) + >>> list(G) + [1, 2, 3, 4] + >>> list(G.edges(data=True)) + [(1, 2, {'weight': 3.0}), (2, 3, {'weight': 27.0}), (3, 4, {'weight': 3.0})] + + See Also + -------- + read_weighted_edgelist + """ + from ast import literal_eval + + G = nx.empty_graph(0, create_using) + for line in lines: + if comments is not None: + p = line.find(comments) + if p >= 0: + line = line[:p] + if not line: + continue + # split line, should have 2 or more + s = line.strip().split(delimiter) + if len(s) < 2: + continue + u = s.pop(0) + v = s.pop(0) + d = s + if nodetype is not None: + try: + u = nodetype(u) + v = nodetype(v) + except Exception as err: + raise TypeError( + f"Failed to convert nodes {u},{v} to type {nodetype}." + ) from err + + if len(d) == 0 or data is False: + # no data or data type specified + edgedata = {} + elif data is True: + # no edge types specified + try: # try to evaluate as dictionary + if delimiter == ",": + edgedata_str = ",".join(d) + else: + edgedata_str = " ".join(d) + edgedata = dict(literal_eval(edgedata_str.strip())) + except Exception as err: + raise TypeError( + f"Failed to convert edge data ({d}) to dictionary." + ) from err + else: + # convert edge data to dictionary with specified keys and type + if len(d) != len(data): + raise IndexError( + f"Edge data {d} and data_keys {data} are not the same length" + ) + edgedata = {} + for (edge_key, edge_type), edge_value in zip(data, d): + try: + edge_value = edge_type(edge_value) + except Exception as err: + raise TypeError( + f"Failed to convert {edge_key} data {edge_value} " + f"to type {edge_type}." + ) from err + edgedata.update({edge_key: edge_value}) + G.add_edge(u, v, **edgedata) + return G + + +@open_file(0, mode="rb") +@nx._dispatch(graphs=None) +def read_edgelist( + path, + comments="#", + delimiter=None, + create_using=None, + nodetype=None, + data=True, + edgetype=None, + encoding="utf-8", +): + """Read a graph from a list of edges. + + Parameters + ---------- + path : file or string + File or filename to read. If a file is provided, it must be + opened in 'rb' mode. + Filenames ending in .gz or .bz2 will be uncompressed. + comments : string, optional + The character used to indicate the start of a comment. To specify that + no character should be treated as a comment, use ``comments=None``. + delimiter : string, optional + The string used to separate values. The default is whitespace. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + nodetype : int, float, str, Python type, optional + Convert node data from strings to specified type + data : bool or list of (label,type) tuples + Tuples specifying dictionary key names and types for edge data + edgetype : int, float, str, Python type, optional OBSOLETE + Convert edge data from strings to specified type and use as 'weight' + encoding: string, optional + Specify which encoding to use when reading file. + + Returns + ------- + G : graph + A networkx Graph or other type specified with create_using + + Examples + -------- + >>> nx.write_edgelist(nx.path_graph(4), "test.edgelist") + >>> G = nx.read_edgelist("test.edgelist") + + >>> fh = open("test.edgelist", "rb") + >>> G = nx.read_edgelist(fh) + >>> fh.close() + + >>> G = nx.read_edgelist("test.edgelist", nodetype=int) + >>> G = nx.read_edgelist("test.edgelist", create_using=nx.DiGraph) + + Edgelist with data in a list: + + >>> textline = "1 2 3" + >>> fh = open("test.edgelist", "w") + >>> d = fh.write(textline) + >>> fh.close() + >>> G = nx.read_edgelist("test.edgelist", nodetype=int, data=(("weight", float),)) + >>> list(G) + [1, 2] + >>> list(G.edges(data=True)) + [(1, 2, {'weight': 3.0})] + + See parse_edgelist() for more examples of formatting. + + See Also + -------- + parse_edgelist + write_edgelist + + Notes + ----- + Since nodes must be hashable, the function nodetype must return hashable + types (e.g. int, float, str, frozenset - or tuples of those, etc.) + """ + lines = (line if isinstance(line, str) else line.decode(encoding) for line in path) + return parse_edgelist( + lines, + comments=comments, + delimiter=delimiter, + create_using=create_using, + nodetype=nodetype, + data=data, + ) + + +def write_weighted_edgelist(G, path, comments="#", delimiter=" ", encoding="utf-8"): + """Write graph G as a list of edges with numeric weights. + + Parameters + ---------- + G : graph + A NetworkX graph + path : file or string + File or filename to write. If a file is provided, it must be + opened in 'wb' mode. + Filenames ending in .gz or .bz2 will be compressed. + comments : string, optional + The character used to indicate the start of a comment + delimiter : string, optional + The string used to separate values. The default is whitespace. + encoding: string, optional + Specify which encoding to use when writing file. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=7) + >>> nx.write_weighted_edgelist(G, "test.weighted.edgelist") + + See Also + -------- + read_edgelist + write_edgelist + read_weighted_edgelist + """ + write_edgelist( + G, + path, + comments=comments, + delimiter=delimiter, + data=("weight",), + encoding=encoding, + ) + + +@nx._dispatch(graphs=None) +def read_weighted_edgelist( + path, + comments="#", + delimiter=None, + create_using=None, + nodetype=None, + encoding="utf-8", +): + """Read a graph as list of edges with numeric weights. + + Parameters + ---------- + path : file or string + File or filename to read. If a file is provided, it must be + opened in 'rb' mode. + Filenames ending in .gz or .bz2 will be uncompressed. + comments : string, optional + The character used to indicate the start of a comment. + delimiter : string, optional + The string used to separate values. The default is whitespace. + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + nodetype : int, float, str, Python type, optional + Convert node data from strings to specified type + encoding: string, optional + Specify which encoding to use when reading file. + + Returns + ------- + G : graph + A networkx Graph or other type specified with create_using + + Notes + ----- + Since nodes must be hashable, the function nodetype must return hashable + types (e.g. int, float, str, frozenset - or tuples of those, etc.) + + Example edgelist file format. + + With numeric edge data:: + + # read with + # >>> G=nx.read_weighted_edgelist(fh) + # source target data + a b 1 + a c 3.14159 + d e 42 + + See Also + -------- + write_weighted_edgelist + """ + return read_edgelist( + path, + comments=comments, + delimiter=delimiter, + create_using=create_using, + nodetype=nodetype, + data=(("weight", float),), + encoding=encoding, + ) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/gexf.py b/phivenv/Lib/site-packages/networkx/readwrite/gexf.py new file mode 100644 index 0000000000000000000000000000000000000000..35be5568fc2349e9367617ac4f51a89ebaafd348 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/gexf.py @@ -0,0 +1,1065 @@ +"""Read and write graphs in GEXF format. + +.. warning:: + This parser uses the standard xml library present in Python, which is + insecure - see :external+python:mod:`xml` for additional information. + Only parse GEFX files you trust. + +GEXF (Graph Exchange XML Format) is a language for describing complex +network structures, their associated data and dynamics. + +This implementation does not support mixed graphs (directed and +undirected edges together). + +Format +------ +GEXF is an XML format. See http://gexf.net/schema.html for the +specification and http://gexf.net/basic.html for examples. +""" +import itertools +import time +from xml.etree.ElementTree import ( + Element, + ElementTree, + SubElement, + register_namespace, + tostring, +) + +import networkx as nx +from networkx.utils import open_file + +__all__ = ["write_gexf", "read_gexf", "relabel_gexf_graph", "generate_gexf"] + + +@open_file(1, mode="wb") +def write_gexf(G, path, encoding="utf-8", prettyprint=True, version="1.2draft"): + """Write G in GEXF format to path. + + "GEXF (Graph Exchange XML Format) is a language for describing + complex networks structures, their associated data and dynamics" [1]_. + + Node attributes are checked according to the version of the GEXF + schemas used for parameters which are not user defined, + e.g. visualization 'viz' [2]_. See example for usage. + + Parameters + ---------- + G : graph + A NetworkX graph + path : file or string + File or file name to write. + File names ending in .gz or .bz2 will be compressed. + encoding : string (optional, default: 'utf-8') + Encoding for text data. + prettyprint : bool (optional, default: True) + If True use line breaks and indenting in output XML. + version: string (optional, default: '1.2draft') + The version of GEXF to be used for nodes attributes checking + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_gexf(G, "test.gexf") + + # visualization data + >>> G.nodes[0]["viz"] = {"size": 54} + >>> G.nodes[0]["viz"]["position"] = {"x": 0, "y": 1} + >>> G.nodes[0]["viz"]["color"] = {"r": 0, "g": 0, "b": 256} + + + Notes + ----- + This implementation does not support mixed graphs (directed and undirected + edges together). + + The node id attribute is set to be the string of the node label. + If you want to specify an id use set it as node data, e.g. + node['a']['id']=1 to set the id of node 'a' to 1. + + References + ---------- + .. [1] GEXF File Format, http://gexf.net/ + .. [2] GEXF schema, http://gexf.net/schema.html + """ + writer = GEXFWriter(encoding=encoding, prettyprint=prettyprint, version=version) + writer.add_graph(G) + writer.write(path) + + +def generate_gexf(G, encoding="utf-8", prettyprint=True, version="1.2draft"): + """Generate lines of GEXF format representation of G. + + "GEXF (Graph Exchange XML Format) is a language for describing + complex networks structures, their associated data and dynamics" [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + encoding : string (optional, default: 'utf-8') + Encoding for text data. + prettyprint : bool (optional, default: True) + If True use line breaks and indenting in output XML. + version : string (default: 1.2draft) + Version of GEFX File Format (see http://gexf.net/schema.html) + Supported values: "1.1draft", "1.2draft" + + + Examples + -------- + >>> G = nx.path_graph(4) + >>> linefeed = chr(10) # linefeed=\n + >>> s = linefeed.join(nx.generate_gexf(G)) + >>> for line in nx.generate_gexf(G): # doctest: +SKIP + ... print(line) + + Notes + ----- + This implementation does not support mixed graphs (directed and undirected + edges together). + + The node id attribute is set to be the string of the node label. + If you want to specify an id use set it as node data, e.g. + node['a']['id']=1 to set the id of node 'a' to 1. + + References + ---------- + .. [1] GEXF File Format, https://gephi.org/gexf/format/ + """ + writer = GEXFWriter(encoding=encoding, prettyprint=prettyprint, version=version) + writer.add_graph(G) + yield from str(writer).splitlines() + + +@open_file(0, mode="rb") +@nx._dispatch(graphs=None) +def read_gexf(path, node_type=None, relabel=False, version="1.2draft"): + """Read graph in GEXF format from path. + + "GEXF (Graph Exchange XML Format) is a language for describing + complex networks structures, their associated data and dynamics" [1]_. + + Parameters + ---------- + path : file or string + File or file name to read. + File names ending in .gz or .bz2 will be decompressed. + node_type: Python type (default: None) + Convert node ids to this type if not None. + relabel : bool (default: False) + If True relabel the nodes to use the GEXF node "label" attribute + instead of the node "id" attribute as the NetworkX node label. + version : string (default: 1.2draft) + Version of GEFX File Format (see http://gexf.net/schema.html) + Supported values: "1.1draft", "1.2draft" + + Returns + ------- + graph: NetworkX graph + If no parallel edges are found a Graph or DiGraph is returned. + Otherwise a MultiGraph or MultiDiGraph is returned. + + Notes + ----- + This implementation does not support mixed graphs (directed and undirected + edges together). + + References + ---------- + .. [1] GEXF File Format, http://gexf.net/ + """ + reader = GEXFReader(node_type=node_type, version=version) + if relabel: + G = relabel_gexf_graph(reader(path)) + else: + G = reader(path) + return G + + +class GEXF: + versions = { + "1.1draft": { + "NS_GEXF": "http://www.gexf.net/1.1draft", + "NS_VIZ": "http://www.gexf.net/1.1draft/viz", + "NS_XSI": "http://www.w3.org/2001/XMLSchema-instance", + "SCHEMALOCATION": " ".join( + [ + "http://www.gexf.net/1.1draft", + "http://www.gexf.net/1.1draft/gexf.xsd", + ] + ), + "VERSION": "1.1", + }, + "1.2draft": { + "NS_GEXF": "http://www.gexf.net/1.2draft", + "NS_VIZ": "http://www.gexf.net/1.2draft/viz", + "NS_XSI": "http://www.w3.org/2001/XMLSchema-instance", + "SCHEMALOCATION": " ".join( + [ + "http://www.gexf.net/1.2draft", + "http://www.gexf.net/1.2draft/gexf.xsd", + ] + ), + "VERSION": "1.2", + }, + } + + def construct_types(self): + types = [ + (int, "integer"), + (float, "float"), + (float, "double"), + (bool, "boolean"), + (list, "string"), + (dict, "string"), + (int, "long"), + (str, "liststring"), + (str, "anyURI"), + (str, "string"), + ] + + # These additions to types allow writing numpy types + try: + import numpy as np + except ImportError: + pass + else: + # prepend so that python types are created upon read (last entry wins) + types = [ + (np.float64, "float"), + (np.float32, "float"), + (np.float16, "float"), + (np.int_, "int"), + (np.int8, "int"), + (np.int16, "int"), + (np.int32, "int"), + (np.int64, "int"), + (np.uint8, "int"), + (np.uint16, "int"), + (np.uint32, "int"), + (np.uint64, "int"), + (np.int_, "int"), + (np.intc, "int"), + (np.intp, "int"), + ] + types + + self.xml_type = dict(types) + self.python_type = dict(reversed(a) for a in types) + + # http://www.w3.org/TR/xmlschema-2/#boolean + convert_bool = { + "true": True, + "false": False, + "True": True, + "False": False, + "0": False, + 0: False, + "1": True, + 1: True, + } + + def set_version(self, version): + d = self.versions.get(version) + if d is None: + raise nx.NetworkXError(f"Unknown GEXF version {version}.") + self.NS_GEXF = d["NS_GEXF"] + self.NS_VIZ = d["NS_VIZ"] + self.NS_XSI = d["NS_XSI"] + self.SCHEMALOCATION = d["SCHEMALOCATION"] + self.VERSION = d["VERSION"] + self.version = version + + +class GEXFWriter(GEXF): + # class for writing GEXF format files + # use write_gexf() function + def __init__( + self, graph=None, encoding="utf-8", prettyprint=True, version="1.2draft" + ): + self.construct_types() + self.prettyprint = prettyprint + self.encoding = encoding + self.set_version(version) + self.xml = Element( + "gexf", + { + "xmlns": self.NS_GEXF, + "xmlns:xsi": self.NS_XSI, + "xsi:schemaLocation": self.SCHEMALOCATION, + "version": self.VERSION, + }, + ) + + # Make meta element a non-graph element + # Also add lastmodifieddate as attribute, not tag + meta_element = Element("meta") + subelement_text = f"NetworkX {nx.__version__}" + SubElement(meta_element, "creator").text = subelement_text + meta_element.set("lastmodifieddate", time.strftime("%Y-%m-%d")) + self.xml.append(meta_element) + + register_namespace("viz", self.NS_VIZ) + + # counters for edge and attribute identifiers + self.edge_id = itertools.count() + self.attr_id = itertools.count() + self.all_edge_ids = set() + # default attributes are stored in dictionaries + self.attr = {} + self.attr["node"] = {} + self.attr["edge"] = {} + self.attr["node"]["dynamic"] = {} + self.attr["node"]["static"] = {} + self.attr["edge"]["dynamic"] = {} + self.attr["edge"]["static"] = {} + + if graph is not None: + self.add_graph(graph) + + def __str__(self): + if self.prettyprint: + self.indent(self.xml) + s = tostring(self.xml).decode(self.encoding) + return s + + def add_graph(self, G): + # first pass through G collecting edge ids + for u, v, dd in G.edges(data=True): + eid = dd.get("id") + if eid is not None: + self.all_edge_ids.add(str(eid)) + # set graph attributes + if G.graph.get("mode") == "dynamic": + mode = "dynamic" + else: + mode = "static" + # Add a graph element to the XML + if G.is_directed(): + default = "directed" + else: + default = "undirected" + name = G.graph.get("name", "") + graph_element = Element("graph", defaultedgetype=default, mode=mode, name=name) + self.graph_element = graph_element + self.add_nodes(G, graph_element) + self.add_edges(G, graph_element) + self.xml.append(graph_element) + + def add_nodes(self, G, graph_element): + nodes_element = Element("nodes") + for node, data in G.nodes(data=True): + node_data = data.copy() + node_id = str(node_data.pop("id", node)) + kw = {"id": node_id} + label = str(node_data.pop("label", node)) + kw["label"] = label + try: + pid = node_data.pop("pid") + kw["pid"] = str(pid) + except KeyError: + pass + try: + start = node_data.pop("start") + kw["start"] = str(start) + self.alter_graph_mode_timeformat(start) + except KeyError: + pass + try: + end = node_data.pop("end") + kw["end"] = str(end) + self.alter_graph_mode_timeformat(end) + except KeyError: + pass + # add node element with attributes + node_element = Element("node", **kw) + # add node element and attr subelements + default = G.graph.get("node_default", {}) + node_data = self.add_parents(node_element, node_data) + if self.VERSION == "1.1": + node_data = self.add_slices(node_element, node_data) + else: + node_data = self.add_spells(node_element, node_data) + node_data = self.add_viz(node_element, node_data) + node_data = self.add_attributes("node", node_element, node_data, default) + nodes_element.append(node_element) + graph_element.append(nodes_element) + + def add_edges(self, G, graph_element): + def edge_key_data(G): + # helper function to unify multigraph and graph edge iterator + if G.is_multigraph(): + for u, v, key, data in G.edges(data=True, keys=True): + edge_data = data.copy() + edge_data.update(key=key) + edge_id = edge_data.pop("id", None) + if edge_id is None: + edge_id = next(self.edge_id) + while str(edge_id) in self.all_edge_ids: + edge_id = next(self.edge_id) + self.all_edge_ids.add(str(edge_id)) + yield u, v, edge_id, edge_data + else: + for u, v, data in G.edges(data=True): + edge_data = data.copy() + edge_id = edge_data.pop("id", None) + if edge_id is None: + edge_id = next(self.edge_id) + while str(edge_id) in self.all_edge_ids: + edge_id = next(self.edge_id) + self.all_edge_ids.add(str(edge_id)) + yield u, v, edge_id, edge_data + + edges_element = Element("edges") + for u, v, key, edge_data in edge_key_data(G): + kw = {"id": str(key)} + try: + edge_label = edge_data.pop("label") + kw["label"] = str(edge_label) + except KeyError: + pass + try: + edge_weight = edge_data.pop("weight") + kw["weight"] = str(edge_weight) + except KeyError: + pass + try: + edge_type = edge_data.pop("type") + kw["type"] = str(edge_type) + except KeyError: + pass + try: + start = edge_data.pop("start") + kw["start"] = str(start) + self.alter_graph_mode_timeformat(start) + except KeyError: + pass + try: + end = edge_data.pop("end") + kw["end"] = str(end) + self.alter_graph_mode_timeformat(end) + except KeyError: + pass + source_id = str(G.nodes[u].get("id", u)) + target_id = str(G.nodes[v].get("id", v)) + edge_element = Element("edge", source=source_id, target=target_id, **kw) + default = G.graph.get("edge_default", {}) + if self.VERSION == "1.1": + edge_data = self.add_slices(edge_element, edge_data) + else: + edge_data = self.add_spells(edge_element, edge_data) + edge_data = self.add_viz(edge_element, edge_data) + edge_data = self.add_attributes("edge", edge_element, edge_data, default) + edges_element.append(edge_element) + graph_element.append(edges_element) + + def add_attributes(self, node_or_edge, xml_obj, data, default): + # Add attrvalues to node or edge + attvalues = Element("attvalues") + if len(data) == 0: + return data + mode = "static" + for k, v in data.items(): + # rename generic multigraph key to avoid any name conflict + if k == "key": + k = "networkx_key" + val_type = type(v) + if val_type not in self.xml_type: + raise TypeError(f"attribute value type is not allowed: {val_type}") + if isinstance(v, list): + # dynamic data + for val, start, end in v: + val_type = type(val) + if start is not None or end is not None: + mode = "dynamic" + self.alter_graph_mode_timeformat(start) + self.alter_graph_mode_timeformat(end) + break + attr_id = self.get_attr_id( + str(k), self.xml_type[val_type], node_or_edge, default, mode + ) + for val, start, end in v: + e = Element("attvalue") + e.attrib["for"] = attr_id + e.attrib["value"] = str(val) + # Handle nan, inf, -inf differently + if val_type == float: + if e.attrib["value"] == "inf": + e.attrib["value"] = "INF" + elif e.attrib["value"] == "nan": + e.attrib["value"] = "NaN" + elif e.attrib["value"] == "-inf": + e.attrib["value"] = "-INF" + if start is not None: + e.attrib["start"] = str(start) + if end is not None: + e.attrib["end"] = str(end) + attvalues.append(e) + else: + # static data + mode = "static" + attr_id = self.get_attr_id( + str(k), self.xml_type[val_type], node_or_edge, default, mode + ) + e = Element("attvalue") + e.attrib["for"] = attr_id + if isinstance(v, bool): + e.attrib["value"] = str(v).lower() + else: + e.attrib["value"] = str(v) + # Handle float nan, inf, -inf differently + if val_type == float: + if e.attrib["value"] == "inf": + e.attrib["value"] = "INF" + elif e.attrib["value"] == "nan": + e.attrib["value"] = "NaN" + elif e.attrib["value"] == "-inf": + e.attrib["value"] = "-INF" + attvalues.append(e) + xml_obj.append(attvalues) + return data + + def get_attr_id(self, title, attr_type, edge_or_node, default, mode): + # find the id of the attribute or generate a new id + try: + return self.attr[edge_or_node][mode][title] + except KeyError: + # generate new id + new_id = str(next(self.attr_id)) + self.attr[edge_or_node][mode][title] = new_id + attr_kwargs = {"id": new_id, "title": title, "type": attr_type} + attribute = Element("attribute", **attr_kwargs) + # add subelement for data default value if present + default_title = default.get(title) + if default_title is not None: + default_element = Element("default") + default_element.text = str(default_title) + attribute.append(default_element) + # new insert it into the XML + attributes_element = None + for a in self.graph_element.findall("attributes"): + # find existing attributes element by class and mode + a_class = a.get("class") + a_mode = a.get("mode", "static") + if a_class == edge_or_node and a_mode == mode: + attributes_element = a + if attributes_element is None: + # create new attributes element + attr_kwargs = {"mode": mode, "class": edge_or_node} + attributes_element = Element("attributes", **attr_kwargs) + self.graph_element.insert(0, attributes_element) + attributes_element.append(attribute) + return new_id + + def add_viz(self, element, node_data): + viz = node_data.pop("viz", False) + if viz: + color = viz.get("color") + if color is not None: + if self.VERSION == "1.1": + e = Element( + f"{{{self.NS_VIZ}}}color", + r=str(color.get("r")), + g=str(color.get("g")), + b=str(color.get("b")), + ) + else: + e = Element( + f"{{{self.NS_VIZ}}}color", + r=str(color.get("r")), + g=str(color.get("g")), + b=str(color.get("b")), + a=str(color.get("a", 1.0)), + ) + element.append(e) + + size = viz.get("size") + if size is not None: + e = Element(f"{{{self.NS_VIZ}}}size", value=str(size)) + element.append(e) + + thickness = viz.get("thickness") + if thickness is not None: + e = Element(f"{{{self.NS_VIZ}}}thickness", value=str(thickness)) + element.append(e) + + shape = viz.get("shape") + if shape is not None: + if shape.startswith("http"): + e = Element( + f"{{{self.NS_VIZ}}}shape", value="image", uri=str(shape) + ) + else: + e = Element(f"{{{self.NS_VIZ}}}shape", value=str(shape)) + element.append(e) + + position = viz.get("position") + if position is not None: + e = Element( + f"{{{self.NS_VIZ}}}position", + x=str(position.get("x")), + y=str(position.get("y")), + z=str(position.get("z")), + ) + element.append(e) + return node_data + + def add_parents(self, node_element, node_data): + parents = node_data.pop("parents", False) + if parents: + parents_element = Element("parents") + for p in parents: + e = Element("parent") + e.attrib["for"] = str(p) + parents_element.append(e) + node_element.append(parents_element) + return node_data + + def add_slices(self, node_or_edge_element, node_or_edge_data): + slices = node_or_edge_data.pop("slices", False) + if slices: + slices_element = Element("slices") + for start, end in slices: + e = Element("slice", start=str(start), end=str(end)) + slices_element.append(e) + node_or_edge_element.append(slices_element) + return node_or_edge_data + + def add_spells(self, node_or_edge_element, node_or_edge_data): + spells = node_or_edge_data.pop("spells", False) + if spells: + spells_element = Element("spells") + for start, end in spells: + e = Element("spell") + if start is not None: + e.attrib["start"] = str(start) + self.alter_graph_mode_timeformat(start) + if end is not None: + e.attrib["end"] = str(end) + self.alter_graph_mode_timeformat(end) + spells_element.append(e) + node_or_edge_element.append(spells_element) + return node_or_edge_data + + def alter_graph_mode_timeformat(self, start_or_end): + # If 'start' or 'end' appears, alter Graph mode to dynamic and + # set timeformat + if self.graph_element.get("mode") == "static": + if start_or_end is not None: + if isinstance(start_or_end, str): + timeformat = "date" + elif isinstance(start_or_end, float): + timeformat = "double" + elif isinstance(start_or_end, int): + timeformat = "long" + else: + raise nx.NetworkXError( + "timeformat should be of the type int, float or str" + ) + self.graph_element.set("timeformat", timeformat) + self.graph_element.set("mode", "dynamic") + + def write(self, fh): + # Serialize graph G in GEXF to the open fh + if self.prettyprint: + self.indent(self.xml) + document = ElementTree(self.xml) + document.write(fh, encoding=self.encoding, xml_declaration=True) + + def indent(self, elem, level=0): + # in-place prettyprint formatter + i = "\n" + " " * level + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + " " + if not elem.tail or not elem.tail.strip(): + elem.tail = i + for elem in elem: + self.indent(elem, level + 1) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i + + +class GEXFReader(GEXF): + # Class to read GEXF format files + # use read_gexf() function + def __init__(self, node_type=None, version="1.2draft"): + self.construct_types() + self.node_type = node_type + # assume simple graph and test for multigraph on read + self.simple_graph = True + self.set_version(version) + + def __call__(self, stream): + self.xml = ElementTree(file=stream) + g = self.xml.find(f"{{{self.NS_GEXF}}}graph") + if g is not None: + return self.make_graph(g) + # try all the versions + for version in self.versions: + self.set_version(version) + g = self.xml.find(f"{{{self.NS_GEXF}}}graph") + if g is not None: + return self.make_graph(g) + raise nx.NetworkXError("No element in GEXF file.") + + def make_graph(self, graph_xml): + # start with empty DiGraph or MultiDiGraph + edgedefault = graph_xml.get("defaultedgetype", None) + if edgedefault == "directed": + G = nx.MultiDiGraph() + else: + G = nx.MultiGraph() + + # graph attributes + graph_name = graph_xml.get("name", "") + if graph_name != "": + G.graph["name"] = graph_name + graph_start = graph_xml.get("start") + if graph_start is not None: + G.graph["start"] = graph_start + graph_end = graph_xml.get("end") + if graph_end is not None: + G.graph["end"] = graph_end + graph_mode = graph_xml.get("mode", "") + if graph_mode == "dynamic": + G.graph["mode"] = "dynamic" + else: + G.graph["mode"] = "static" + + # timeformat + self.timeformat = graph_xml.get("timeformat") + if self.timeformat == "date": + self.timeformat = "string" + + # node and edge attributes + attributes_elements = graph_xml.findall(f"{{{self.NS_GEXF}}}attributes") + # dictionaries to hold attributes and attribute defaults + node_attr = {} + node_default = {} + edge_attr = {} + edge_default = {} + for a in attributes_elements: + attr_class = a.get("class") + if attr_class == "node": + na, nd = self.find_gexf_attributes(a) + node_attr.update(na) + node_default.update(nd) + G.graph["node_default"] = node_default + elif attr_class == "edge": + ea, ed = self.find_gexf_attributes(a) + edge_attr.update(ea) + edge_default.update(ed) + G.graph["edge_default"] = edge_default + else: + raise # unknown attribute class + + # Hack to handle Gephi0.7beta bug + # add weight attribute + ea = {"weight": {"type": "double", "mode": "static", "title": "weight"}} + ed = {} + edge_attr.update(ea) + edge_default.update(ed) + G.graph["edge_default"] = edge_default + + # add nodes + nodes_element = graph_xml.find(f"{{{self.NS_GEXF}}}nodes") + if nodes_element is not None: + for node_xml in nodes_element.findall(f"{{{self.NS_GEXF}}}node"): + self.add_node(G, node_xml, node_attr) + + # add edges + edges_element = graph_xml.find(f"{{{self.NS_GEXF}}}edges") + if edges_element is not None: + for edge_xml in edges_element.findall(f"{{{self.NS_GEXF}}}edge"): + self.add_edge(G, edge_xml, edge_attr) + + # switch to Graph or DiGraph if no parallel edges were found. + if self.simple_graph: + if G.is_directed(): + G = nx.DiGraph(G) + else: + G = nx.Graph(G) + return G + + def add_node(self, G, node_xml, node_attr, node_pid=None): + # add a single node with attributes to the graph + + # get attributes and subattributues for node + data = self.decode_attr_elements(node_attr, node_xml) + data = self.add_parents(data, node_xml) # add any parents + if self.VERSION == "1.1": + data = self.add_slices(data, node_xml) # add slices + else: + data = self.add_spells(data, node_xml) # add spells + data = self.add_viz(data, node_xml) # add viz + data = self.add_start_end(data, node_xml) # add start/end + + # find the node id and cast it to the appropriate type + node_id = node_xml.get("id") + if self.node_type is not None: + node_id = self.node_type(node_id) + + # every node should have a label + node_label = node_xml.get("label") + data["label"] = node_label + + # parent node id + node_pid = node_xml.get("pid", node_pid) + if node_pid is not None: + data["pid"] = node_pid + + # check for subnodes, recursive + subnodes = node_xml.find(f"{{{self.NS_GEXF}}}nodes") + if subnodes is not None: + for node_xml in subnodes.findall(f"{{{self.NS_GEXF}}}node"): + self.add_node(G, node_xml, node_attr, node_pid=node_id) + + G.add_node(node_id, **data) + + def add_start_end(self, data, xml): + # start and end times + ttype = self.timeformat + node_start = xml.get("start") + if node_start is not None: + data["start"] = self.python_type[ttype](node_start) + node_end = xml.get("end") + if node_end is not None: + data["end"] = self.python_type[ttype](node_end) + return data + + def add_viz(self, data, node_xml): + # add viz element for node + viz = {} + color = node_xml.find(f"{{{self.NS_VIZ}}}color") + if color is not None: + if self.VERSION == "1.1": + viz["color"] = { + "r": int(color.get("r")), + "g": int(color.get("g")), + "b": int(color.get("b")), + } + else: + viz["color"] = { + "r": int(color.get("r")), + "g": int(color.get("g")), + "b": int(color.get("b")), + "a": float(color.get("a", 1)), + } + + size = node_xml.find(f"{{{self.NS_VIZ}}}size") + if size is not None: + viz["size"] = float(size.get("value")) + + thickness = node_xml.find(f"{{{self.NS_VIZ}}}thickness") + if thickness is not None: + viz["thickness"] = float(thickness.get("value")) + + shape = node_xml.find(f"{{{self.NS_VIZ}}}shape") + if shape is not None: + viz["shape"] = shape.get("shape") + if viz["shape"] == "image": + viz["shape"] = shape.get("uri") + + position = node_xml.find(f"{{{self.NS_VIZ}}}position") + if position is not None: + viz["position"] = { + "x": float(position.get("x", 0)), + "y": float(position.get("y", 0)), + "z": float(position.get("z", 0)), + } + + if len(viz) > 0: + data["viz"] = viz + return data + + def add_parents(self, data, node_xml): + parents_element = node_xml.find(f"{{{self.NS_GEXF}}}parents") + if parents_element is not None: + data["parents"] = [] + for p in parents_element.findall(f"{{{self.NS_GEXF}}}parent"): + parent = p.get("for") + data["parents"].append(parent) + return data + + def add_slices(self, data, node_or_edge_xml): + slices_element = node_or_edge_xml.find(f"{{{self.NS_GEXF}}}slices") + if slices_element is not None: + data["slices"] = [] + for s in slices_element.findall(f"{{{self.NS_GEXF}}}slice"): + start = s.get("start") + end = s.get("end") + data["slices"].append((start, end)) + return data + + def add_spells(self, data, node_or_edge_xml): + spells_element = node_or_edge_xml.find(f"{{{self.NS_GEXF}}}spells") + if spells_element is not None: + data["spells"] = [] + ttype = self.timeformat + for s in spells_element.findall(f"{{{self.NS_GEXF}}}spell"): + start = self.python_type[ttype](s.get("start")) + end = self.python_type[ttype](s.get("end")) + data["spells"].append((start, end)) + return data + + def add_edge(self, G, edge_element, edge_attr): + # add an edge to the graph + + # raise error if we find mixed directed and undirected edges + edge_direction = edge_element.get("type") + if G.is_directed() and edge_direction == "undirected": + raise nx.NetworkXError("Undirected edge found in directed graph.") + if (not G.is_directed()) and edge_direction == "directed": + raise nx.NetworkXError("Directed edge found in undirected graph.") + + # Get source and target and recast type if required + source = edge_element.get("source") + target = edge_element.get("target") + if self.node_type is not None: + source = self.node_type(source) + target = self.node_type(target) + + data = self.decode_attr_elements(edge_attr, edge_element) + data = self.add_start_end(data, edge_element) + + if self.VERSION == "1.1": + data = self.add_slices(data, edge_element) # add slices + else: + data = self.add_spells(data, edge_element) # add spells + + # GEXF stores edge ids as an attribute + # NetworkX uses them as keys in multigraphs + # if networkx_key is not specified as an attribute + edge_id = edge_element.get("id") + if edge_id is not None: + data["id"] = edge_id + + # check if there is a 'multigraph_key' and use that as edge_id + multigraph_key = data.pop("networkx_key", None) + if multigraph_key is not None: + edge_id = multigraph_key + + weight = edge_element.get("weight") + if weight is not None: + data["weight"] = float(weight) + + edge_label = edge_element.get("label") + if edge_label is not None: + data["label"] = edge_label + + if G.has_edge(source, target): + # seen this edge before - this is a multigraph + self.simple_graph = False + G.add_edge(source, target, key=edge_id, **data) + if edge_direction == "mutual": + G.add_edge(target, source, key=edge_id, **data) + + def decode_attr_elements(self, gexf_keys, obj_xml): + # Use the key information to decode the attr XML + attr = {} + # look for outer '' element + attr_element = obj_xml.find(f"{{{self.NS_GEXF}}}attvalues") + if attr_element is not None: + # loop over elements + for a in attr_element.findall(f"{{{self.NS_GEXF}}}attvalue"): + key = a.get("for") # for is required + try: # should be in our gexf_keys dictionary + title = gexf_keys[key]["title"] + except KeyError as err: + raise nx.NetworkXError(f"No attribute defined for={key}.") from err + atype = gexf_keys[key]["type"] + value = a.get("value") + if atype == "boolean": + value = self.convert_bool[value] + else: + value = self.python_type[atype](value) + if gexf_keys[key]["mode"] == "dynamic": + # for dynamic graphs use list of three-tuples + # [(value1,start1,end1), (value2,start2,end2), etc] + ttype = self.timeformat + start = self.python_type[ttype](a.get("start")) + end = self.python_type[ttype](a.get("end")) + if title in attr: + attr[title].append((value, start, end)) + else: + attr[title] = [(value, start, end)] + else: + # for static graphs just assign the value + attr[title] = value + return attr + + def find_gexf_attributes(self, attributes_element): + # Extract all the attributes and defaults + attrs = {} + defaults = {} + mode = attributes_element.get("mode") + for k in attributes_element.findall(f"{{{self.NS_GEXF}}}attribute"): + attr_id = k.get("id") + title = k.get("title") + atype = k.get("type") + attrs[attr_id] = {"title": title, "type": atype, "mode": mode} + # check for the 'default' subelement of key element and add + default = k.find(f"{{{self.NS_GEXF}}}default") + if default is not None: + if atype == "boolean": + value = self.convert_bool[default.text] + else: + value = self.python_type[atype](default.text) + defaults[title] = value + return attrs, defaults + + +def relabel_gexf_graph(G): + """Relabel graph using "label" node keyword for node label. + + Parameters + ---------- + G : graph + A NetworkX graph read from GEXF data + + Returns + ------- + H : graph + A NetworkX graph with relabeled nodes + + Raises + ------ + NetworkXError + If node labels are missing or not unique while relabel=True. + + Notes + ----- + This function relabels the nodes in a NetworkX graph with the + "label" attribute. It also handles relabeling the specific GEXF + node attributes "parents", and "pid". + """ + # build mapping of node labels, do some error checking + try: + mapping = [(u, G.nodes[u]["label"]) for u in G] + except KeyError as err: + raise nx.NetworkXError( + "Failed to relabel nodes: missing node labels found. Use relabel=False." + ) from err + x, y = zip(*mapping) + if len(set(y)) != len(G): + raise nx.NetworkXError( + "Failed to relabel nodes: " + "duplicate node labels found. " + "Use relabel=False." + ) + mapping = dict(mapping) + H = nx.relabel_nodes(G, mapping) + # relabel attributes + for n in G: + m = mapping[n] + H.nodes[m]["id"] = n + H.nodes[m].pop("label") + if "pid" in H.nodes[m]: + H.nodes[m]["pid"] = mapping[G.nodes[n]["pid"]] + if "parents" in H.nodes[m]: + H.nodes[m]["parents"] = [mapping[p] for p in G.nodes[n]["parents"]] + return H diff --git a/phivenv/Lib/site-packages/networkx/readwrite/gml.py b/phivenv/Lib/site-packages/networkx/readwrite/gml.py new file mode 100644 index 0000000000000000000000000000000000000000..3804989c72327ffb3787eb0bbb9ef5f742159a6a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/gml.py @@ -0,0 +1,878 @@ +""" +Read graphs in GML format. + +"GML, the Graph Modelling Language, is our proposal for a portable +file format for graphs. GML's key features are portability, simple +syntax, extensibility and flexibility. A GML file consists of a +hierarchical key-value lists. Graphs can be annotated with arbitrary +data structures. The idea for a common file format was born at the +GD'95; this proposal is the outcome of many discussions. GML is the +standard file format in the Graphlet graph editor system. It has been +overtaken and adapted by several other systems for drawing graphs." + +GML files are stored using a 7-bit ASCII encoding with any extended +ASCII characters (iso8859-1) appearing as HTML character entities. +You will need to give some thought into how the exported data should +interact with different languages and even different Python versions. +Re-importing from gml is also a concern. + +Without specifying a `stringizer`/`destringizer`, the code is capable of +writing `int`/`float`/`str`/`dict`/`list` data as required by the GML +specification. For writing other data types, and for reading data other +than `str` you need to explicitly supply a `stringizer`/`destringizer`. + +For additional documentation on the GML file format, please see the +`GML website `_. + +Several example graphs in GML format may be found on Mark Newman's +`Network data page `_. +""" +import html.entities as htmlentitydefs +import re +import warnings +from ast import literal_eval +from collections import defaultdict +from enum import Enum +from io import StringIO +from typing import Any, NamedTuple + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils import open_file + +__all__ = ["read_gml", "parse_gml", "generate_gml", "write_gml"] + + +def escape(text): + """Use XML character references to escape characters. + + Use XML character references for unprintable or non-ASCII + characters, double quotes and ampersands in a string + """ + + def fixup(m): + ch = m.group(0) + return "&#" + str(ord(ch)) + ";" + + text = re.sub('[^ -~]|[&"]', fixup, text) + return text if isinstance(text, str) else str(text) + + +def unescape(text): + """Replace XML character references with the referenced characters""" + + def fixup(m): + text = m.group(0) + if text[1] == "#": + # Character reference + if text[2] == "x": + code = int(text[3:-1], 16) + else: + code = int(text[2:-1]) + else: + # Named entity + try: + code = htmlentitydefs.name2codepoint[text[1:-1]] + except KeyError: + return text # leave unchanged + try: + return chr(code) + except (ValueError, OverflowError): + return text # leave unchanged + + return re.sub("&(?:[0-9A-Za-z]+|#(?:[0-9]+|x[0-9A-Fa-f]+));", fixup, text) + + +def literal_destringizer(rep): + """Convert a Python literal to the value it represents. + + Parameters + ---------- + rep : string + A Python literal. + + Returns + ------- + value : object + The value of the Python literal. + + Raises + ------ + ValueError + If `rep` is not a Python literal. + """ + if isinstance(rep, str): + orig_rep = rep + try: + return literal_eval(rep) + except SyntaxError as err: + raise ValueError(f"{orig_rep!r} is not a valid Python literal") from err + else: + raise ValueError(f"{rep!r} is not a string") + + +@open_file(0, mode="rb") +@nx._dispatch(graphs=None) +def read_gml(path, label="label", destringizer=None): + """Read graph in GML format from `path`. + + Parameters + ---------- + path : filename or filehandle + The filename or filehandle to read from. + + label : string, optional + If not None, the parsed nodes will be renamed according to node + attributes indicated by `label`. Default value: 'label'. + + destringizer : callable, optional + A `destringizer` that recovers values stored as strings in GML. If it + cannot convert a string to a value, a `ValueError` is raised. Default + value : None. + + Returns + ------- + G : NetworkX graph + The parsed graph. + + Raises + ------ + NetworkXError + If the input cannot be parsed. + + See Also + -------- + write_gml, parse_gml + literal_destringizer + + Notes + ----- + GML files are stored using a 7-bit ASCII encoding with any extended + ASCII characters (iso8859-1) appearing as HTML character entities. + Without specifying a `stringizer`/`destringizer`, the code is capable of + writing `int`/`float`/`str`/`dict`/`list` data as required by the GML + specification. For writing other data types, and for reading data other + than `str` you need to explicitly supply a `stringizer`/`destringizer`. + + For additional documentation on the GML file format, please see the + `GML url `_. + + See the module docstring :mod:`networkx.readwrite.gml` for more details. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_gml(G, "test.gml") + + GML values are interpreted as strings by default: + + >>> H = nx.read_gml("test.gml") + >>> H.nodes + NodeView(('0', '1', '2', '3')) + + When a `destringizer` is provided, GML values are converted to the provided type. + For example, integer nodes can be recovered as shown below: + + >>> J = nx.read_gml("test.gml", destringizer=int) + >>> J.nodes + NodeView((0, 1, 2, 3)) + + """ + + def filter_lines(lines): + for line in lines: + try: + line = line.decode("ascii") + except UnicodeDecodeError as err: + raise NetworkXError("input is not ASCII-encoded") from err + if not isinstance(line, str): + lines = str(lines) + if line and line[-1] == "\n": + line = line[:-1] + yield line + + G = parse_gml_lines(filter_lines(path), label, destringizer) + return G + + +@nx._dispatch(graphs=None) +def parse_gml(lines, label="label", destringizer=None): + """Parse GML graph from a string or iterable. + + Parameters + ---------- + lines : string or iterable of strings + Data in GML format. + + label : string, optional + If not None, the parsed nodes will be renamed according to node + attributes indicated by `label`. Default value: 'label'. + + destringizer : callable, optional + A `destringizer` that recovers values stored as strings in GML. If it + cannot convert a string to a value, a `ValueError` is raised. Default + value : None. + + Returns + ------- + G : NetworkX graph + The parsed graph. + + Raises + ------ + NetworkXError + If the input cannot be parsed. + + See Also + -------- + write_gml, read_gml + + Notes + ----- + This stores nested GML attributes as dictionaries in the NetworkX graph, + node, and edge attribute structures. + + GML files are stored using a 7-bit ASCII encoding with any extended + ASCII characters (iso8859-1) appearing as HTML character entities. + Without specifying a `stringizer`/`destringizer`, the code is capable of + writing `int`/`float`/`str`/`dict`/`list` data as required by the GML + specification. For writing other data types, and for reading data other + than `str` you need to explicitly supply a `stringizer`/`destringizer`. + + For additional documentation on the GML file format, please see the + `GML url `_. + + See the module docstring :mod:`networkx.readwrite.gml` for more details. + """ + + def decode_line(line): + if isinstance(line, bytes): + try: + line.decode("ascii") + except UnicodeDecodeError as err: + raise NetworkXError("input is not ASCII-encoded") from err + if not isinstance(line, str): + line = str(line) + return line + + def filter_lines(lines): + if isinstance(lines, str): + lines = decode_line(lines) + lines = lines.splitlines() + yield from lines + else: + for line in lines: + line = decode_line(line) + if line and line[-1] == "\n": + line = line[:-1] + if line.find("\n") != -1: + raise NetworkXError("input line contains newline") + yield line + + G = parse_gml_lines(filter_lines(lines), label, destringizer) + return G + + +class Pattern(Enum): + """encodes the index of each token-matching pattern in `tokenize`.""" + + KEYS = 0 + REALS = 1 + INTS = 2 + STRINGS = 3 + DICT_START = 4 + DICT_END = 5 + COMMENT_WHITESPACE = 6 + + +class Token(NamedTuple): + category: Pattern + value: Any + line: int + position: int + + +LIST_START_VALUE = "_networkx_list_start" + + +def parse_gml_lines(lines, label, destringizer): + """Parse GML `lines` into a graph.""" + + def tokenize(): + patterns = [ + r"[A-Za-z][0-9A-Za-z_]*\b", # keys + # reals + r"[+-]?(?:[0-9]*\.[0-9]+|[0-9]+\.[0-9]*|INF)(?:[Ee][+-]?[0-9]+)?", + r"[+-]?[0-9]+", # ints + r'".*?"', # strings + r"\[", # dict start + r"\]", # dict end + r"#.*$|\s+", # comments and whitespaces + ] + tokens = re.compile("|".join(f"({pattern})" for pattern in patterns)) + lineno = 0 + multilines = [] # entries spread across multiple lines + for line in lines: + pos = 0 + + # deal with entries spread across multiple lines + # + # should we actually have to deal with escaped "s then do it here + if multilines: + multilines.append(line.strip()) + if line[-1] == '"': # closing multiline entry + # multiline entries will be joined by space. cannot + # reintroduce newlines as this will break the tokenizer + line = " ".join(multilines) + multilines = [] + else: # continued multiline entry + lineno += 1 + continue + else: + if line.count('"') == 1: # opening multiline entry + if line.strip()[0] != '"' and line.strip()[-1] != '"': + # since we expect something like key "value", the " should not be found at ends + # otherwise tokenizer will pick up the formatting mistake. + multilines = [line.rstrip()] + lineno += 1 + continue + + length = len(line) + + while pos < length: + match = tokens.match(line, pos) + if match is None: + m = f"cannot tokenize {line[pos:]} at ({lineno + 1}, {pos + 1})" + raise NetworkXError(m) + for i in range(len(patterns)): + group = match.group(i + 1) + if group is not None: + if i == 0: # keys + value = group.rstrip() + elif i == 1: # reals + value = float(group) + elif i == 2: # ints + value = int(group) + else: + value = group + if i != 6: # comments and whitespaces + yield Token(Pattern(i), value, lineno + 1, pos + 1) + pos += len(group) + break + lineno += 1 + yield Token(None, None, lineno + 1, 1) # EOF + + def unexpected(curr_token, expected): + category, value, lineno, pos = curr_token + value = repr(value) if value is not None else "EOF" + raise NetworkXError(f"expected {expected}, found {value} at ({lineno}, {pos})") + + def consume(curr_token, category, expected): + if curr_token.category == category: + return next(tokens) + unexpected(curr_token, expected) + + def parse_kv(curr_token): + dct = defaultdict(list) + while curr_token.category == Pattern.KEYS: + key = curr_token.value + curr_token = next(tokens) + category = curr_token.category + if category == Pattern.REALS or category == Pattern.INTS: + value = curr_token.value + curr_token = next(tokens) + elif category == Pattern.STRINGS: + value = unescape(curr_token.value[1:-1]) + if destringizer: + try: + value = destringizer(value) + except ValueError: + pass + # Special handling for empty lists and tuples + if value == "()": + value = () + if value == "[]": + value = [] + curr_token = next(tokens) + elif category == Pattern.DICT_START: + curr_token, value = parse_dict(curr_token) + else: + # Allow for string convertible id and label values + if key in ("id", "label", "source", "target"): + try: + # String convert the token value + value = unescape(str(curr_token.value)) + if destringizer: + try: + value = destringizer(value) + except ValueError: + pass + curr_token = next(tokens) + except Exception: + msg = ( + "an int, float, string, '[' or string" + + " convertible ASCII value for node id or label" + ) + unexpected(curr_token, msg) + # Special handling for nan and infinity. Since the gml language + # defines unquoted strings as keys, the numeric and string branches + # are skipped and we end up in this special branch, so we need to + # convert the current token value to a float for NAN and plain INF. + # +/-INF are handled in the pattern for 'reals' in tokenize(). This + # allows labels and values to be nan or infinity, but not keys. + elif curr_token.value in {"NAN", "INF"}: + value = float(curr_token.value) + curr_token = next(tokens) + else: # Otherwise error out + unexpected(curr_token, "an int, float, string or '['") + dct[key].append(value) + + def clean_dict_value(value): + if not isinstance(value, list): + return value + if len(value) == 1: + return value[0] + if value[0] == LIST_START_VALUE: + return value[1:] + return value + + dct = {key: clean_dict_value(value) for key, value in dct.items()} + return curr_token, dct + + def parse_dict(curr_token): + # dict start + curr_token = consume(curr_token, Pattern.DICT_START, "'['") + # dict contents + curr_token, dct = parse_kv(curr_token) + # dict end + curr_token = consume(curr_token, Pattern.DICT_END, "']'") + return curr_token, dct + + def parse_graph(): + curr_token, dct = parse_kv(next(tokens)) + if curr_token.category is not None: # EOF + unexpected(curr_token, "EOF") + if "graph" not in dct: + raise NetworkXError("input contains no graph") + graph = dct["graph"] + if isinstance(graph, list): + raise NetworkXError("input contains more than one graph") + return graph + + tokens = tokenize() + graph = parse_graph() + + directed = graph.pop("directed", False) + multigraph = graph.pop("multigraph", False) + if not multigraph: + G = nx.DiGraph() if directed else nx.Graph() + else: + G = nx.MultiDiGraph() if directed else nx.MultiGraph() + graph_attr = {k: v for k, v in graph.items() if k not in ("node", "edge")} + G.graph.update(graph_attr) + + def pop_attr(dct, category, attr, i): + try: + return dct.pop(attr) + except KeyError as err: + raise NetworkXError(f"{category} #{i} has no {attr!r} attribute") from err + + nodes = graph.get("node", []) + mapping = {} + node_labels = set() + for i, node in enumerate(nodes if isinstance(nodes, list) else [nodes]): + id = pop_attr(node, "node", "id", i) + if id in G: + raise NetworkXError(f"node id {id!r} is duplicated") + if label is not None and label != "id": + node_label = pop_attr(node, "node", label, i) + if node_label in node_labels: + raise NetworkXError(f"node label {node_label!r} is duplicated") + node_labels.add(node_label) + mapping[id] = node_label + G.add_node(id, **node) + + edges = graph.get("edge", []) + for i, edge in enumerate(edges if isinstance(edges, list) else [edges]): + source = pop_attr(edge, "edge", "source", i) + target = pop_attr(edge, "edge", "target", i) + if source not in G: + raise NetworkXError(f"edge #{i} has undefined source {source!r}") + if target not in G: + raise NetworkXError(f"edge #{i} has undefined target {target!r}") + if not multigraph: + if not G.has_edge(source, target): + G.add_edge(source, target, **edge) + else: + arrow = "->" if directed else "--" + msg = f"edge #{i} ({source!r}{arrow}{target!r}) is duplicated" + raise nx.NetworkXError(msg) + else: + key = edge.pop("key", None) + if key is not None and G.has_edge(source, target, key): + arrow = "->" if directed else "--" + msg = f"edge #{i} ({source!r}{arrow}{target!r}, {key!r})" + msg2 = 'Hint: If multigraph add "multigraph 1" to file header.' + raise nx.NetworkXError(msg + " is duplicated\n" + msg2) + G.add_edge(source, target, key, **edge) + + if label is not None and label != "id": + G = nx.relabel_nodes(G, mapping) + return G + + +def literal_stringizer(value): + """Convert a `value` to a Python literal in GML representation. + + Parameters + ---------- + value : object + The `value` to be converted to GML representation. + + Returns + ------- + rep : string + A double-quoted Python literal representing value. Unprintable + characters are replaced by XML character references. + + Raises + ------ + ValueError + If `value` cannot be converted to GML. + + Notes + ----- + The original value can be recovered using the + :func:`networkx.readwrite.gml.literal_destringizer` function. + """ + + def stringize(value): + if isinstance(value, (int, bool)) or value is None: + if value is True: # GML uses 1/0 for boolean values. + buf.write(str(1)) + elif value is False: + buf.write(str(0)) + else: + buf.write(str(value)) + elif isinstance(value, str): + text = repr(value) + if text[0] != "u": + try: + value.encode("latin1") + except UnicodeEncodeError: + text = "u" + text + buf.write(text) + elif isinstance(value, (float, complex, str, bytes)): + buf.write(repr(value)) + elif isinstance(value, list): + buf.write("[") + first = True + for item in value: + if not first: + buf.write(",") + else: + first = False + stringize(item) + buf.write("]") + elif isinstance(value, tuple): + if len(value) > 1: + buf.write("(") + first = True + for item in value: + if not first: + buf.write(",") + else: + first = False + stringize(item) + buf.write(")") + elif value: + buf.write("(") + stringize(value[0]) + buf.write(",)") + else: + buf.write("()") + elif isinstance(value, dict): + buf.write("{") + first = True + for key, value in value.items(): + if not first: + buf.write(",") + else: + first = False + stringize(key) + buf.write(":") + stringize(value) + buf.write("}") + elif isinstance(value, set): + buf.write("{") + first = True + for item in value: + if not first: + buf.write(",") + else: + first = False + stringize(item) + buf.write("}") + else: + msg = f"{value!r} cannot be converted into a Python literal" + raise ValueError(msg) + + buf = StringIO() + stringize(value) + return buf.getvalue() + + +def generate_gml(G, stringizer=None): + r"""Generate a single entry of the graph `G` in GML format. + + Parameters + ---------- + G : NetworkX graph + The graph to be converted to GML. + + stringizer : callable, optional + A `stringizer` which converts non-int/non-float/non-dict values into + strings. If it cannot convert a value into a string, it should raise a + `ValueError` to indicate that. Default value: None. + + Returns + ------- + lines: generator of strings + Lines of GML data. Newlines are not appended. + + Raises + ------ + NetworkXError + If `stringizer` cannot convert a value into a string, or the value to + convert is not a string while `stringizer` is None. + + See Also + -------- + literal_stringizer + + Notes + ----- + Graph attributes named 'directed', 'multigraph', 'node' or + 'edge', node attributes named 'id' or 'label', edge attributes + named 'source' or 'target' (or 'key' if `G` is a multigraph) + are ignored because these attribute names are used to encode the graph + structure. + + GML files are stored using a 7-bit ASCII encoding with any extended + ASCII characters (iso8859-1) appearing as HTML character entities. + Without specifying a `stringizer`/`destringizer`, the code is capable of + writing `int`/`float`/`str`/`dict`/`list` data as required by the GML + specification. For writing other data types, and for reading data other + than `str` you need to explicitly supply a `stringizer`/`destringizer`. + + For additional documentation on the GML file format, please see the + `GML url `_. + + See the module docstring :mod:`networkx.readwrite.gml` for more details. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_node("1") + >>> print("\n".join(nx.generate_gml(G))) + graph [ + node [ + id 0 + label "1" + ] + ] + >>> G = nx.MultiGraph([("a", "b"), ("a", "b")]) + >>> print("\n".join(nx.generate_gml(G))) + graph [ + multigraph 1 + node [ + id 0 + label "a" + ] + node [ + id 1 + label "b" + ] + edge [ + source 0 + target 1 + key 0 + ] + edge [ + source 0 + target 1 + key 1 + ] + ] + """ + valid_keys = re.compile("^[A-Za-z][0-9A-Za-z_]*$") + + def stringize(key, value, ignored_keys, indent, in_list=False): + if not isinstance(key, str): + raise NetworkXError(f"{key!r} is not a string") + if not valid_keys.match(key): + raise NetworkXError(f"{key!r} is not a valid key") + if not isinstance(key, str): + key = str(key) + if key not in ignored_keys: + if isinstance(value, (int, bool)): + if key == "label": + yield indent + key + ' "' + str(value) + '"' + elif value is True: + # python bool is an instance of int + yield indent + key + " 1" + elif value is False: + yield indent + key + " 0" + # GML only supports signed 32-bit integers + elif value < -(2**31) or value >= 2**31: + yield indent + key + ' "' + str(value) + '"' + else: + yield indent + key + " " + str(value) + elif isinstance(value, float): + text = repr(value).upper() + # GML matches INF to keys, so prepend + to INF. Use repr(float(*)) + # instead of string literal to future proof against changes to repr. + if text == repr(float("inf")).upper(): + text = "+" + text + else: + # GML requires that a real literal contain a decimal point, but + # repr may not output a decimal point when the mantissa is + # integral and hence needs fixing. + epos = text.rfind("E") + if epos != -1 and text.find(".", 0, epos) == -1: + text = text[:epos] + "." + text[epos:] + if key == "label": + yield indent + key + ' "' + text + '"' + else: + yield indent + key + " " + text + elif isinstance(value, dict): + yield indent + key + " [" + next_indent = indent + " " + for key, value in value.items(): + yield from stringize(key, value, (), next_indent) + yield indent + "]" + elif isinstance(value, tuple) and key == "label": + yield indent + key + f" \"({','.join(repr(v) for v in value)})\"" + elif isinstance(value, (list, tuple)) and key != "label" and not in_list: + if len(value) == 0: + yield indent + key + " " + f'"{value!r}"' + if len(value) == 1: + yield indent + key + " " + f'"{LIST_START_VALUE}"' + for val in value: + yield from stringize(key, val, (), indent, True) + else: + if stringizer: + try: + value = stringizer(value) + except ValueError as err: + raise NetworkXError( + f"{value!r} cannot be converted into a string" + ) from err + if not isinstance(value, str): + raise NetworkXError(f"{value!r} is not a string") + yield indent + key + ' "' + escape(value) + '"' + + multigraph = G.is_multigraph() + yield "graph [" + + # Output graph attributes + if G.is_directed(): + yield " directed 1" + if multigraph: + yield " multigraph 1" + ignored_keys = {"directed", "multigraph", "node", "edge"} + for attr, value in G.graph.items(): + yield from stringize(attr, value, ignored_keys, " ") + + # Output node data + node_id = dict(zip(G, range(len(G)))) + ignored_keys = {"id", "label"} + for node, attrs in G.nodes.items(): + yield " node [" + yield " id " + str(node_id[node]) + yield from stringize("label", node, (), " ") + for attr, value in attrs.items(): + yield from stringize(attr, value, ignored_keys, " ") + yield " ]" + + # Output edge data + ignored_keys = {"source", "target"} + kwargs = {"data": True} + if multigraph: + ignored_keys.add("key") + kwargs["keys"] = True + for e in G.edges(**kwargs): + yield " edge [" + yield " source " + str(node_id[e[0]]) + yield " target " + str(node_id[e[1]]) + if multigraph: + yield from stringize("key", e[2], (), " ") + for attr, value in e[-1].items(): + yield from stringize(attr, value, ignored_keys, " ") + yield " ]" + yield "]" + + +@open_file(1, mode="wb") +def write_gml(G, path, stringizer=None): + """Write a graph `G` in GML format to the file or file handle `path`. + + Parameters + ---------- + G : NetworkX graph + The graph to be converted to GML. + + path : filename or filehandle + The filename or filehandle to write. Files whose names end with .gz or + .bz2 will be compressed. + + stringizer : callable, optional + A `stringizer` which converts non-int/non-float/non-dict values into + strings. If it cannot convert a value into a string, it should raise a + `ValueError` to indicate that. Default value: None. + + Raises + ------ + NetworkXError + If `stringizer` cannot convert a value into a string, or the value to + convert is not a string while `stringizer` is None. + + See Also + -------- + read_gml, generate_gml + literal_stringizer + + Notes + ----- + Graph attributes named 'directed', 'multigraph', 'node' or + 'edge', node attributes named 'id' or 'label', edge attributes + named 'source' or 'target' (or 'key' if `G` is a multigraph) + are ignored because these attribute names are used to encode the graph + structure. + + GML files are stored using a 7-bit ASCII encoding with any extended + ASCII characters (iso8859-1) appearing as HTML character entities. + Without specifying a `stringizer`/`destringizer`, the code is capable of + writing `int`/`float`/`str`/`dict`/`list` data as required by the GML + specification. For writing other data types, and for reading data other + than `str` you need to explicitly supply a `stringizer`/`destringizer`. + + Note that while we allow non-standard GML to be read from a file, we make + sure to write GML format. In particular, underscores are not allowed in + attribute names. + For additional documentation on the GML file format, please see the + `GML url `_. + + See the module docstring :mod:`networkx.readwrite.gml` for more details. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_gml(G, "test.gml") + + Filenames ending in .gz or .bz2 will be compressed. + + >>> nx.write_gml(G, "test.gml.gz") + """ + for line in generate_gml(G, stringizer): + path.write((line + "\n").encode("ascii")) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/graph6.py b/phivenv/Lib/site-packages/networkx/readwrite/graph6.py new file mode 100644 index 0000000000000000000000000000000000000000..3342da2004a88496e6efe8abc098ee66ca47b10c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/graph6.py @@ -0,0 +1,416 @@ +# Original author: D. Eppstein, UC Irvine, August 12, 2003. +# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain. +"""Functions for reading and writing graphs in the *graph6* format. + +The *graph6* file format is suitable for small graphs or large dense +graphs. For large sparse graphs, use the *sparse6* format. + +For more information, see the `graph6`_ homepage. + +.. _graph6: http://users.cecs.anu.edu.au/~bdm/data/formats.html + +""" +from itertools import islice + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils import not_implemented_for, open_file + +__all__ = ["from_graph6_bytes", "read_graph6", "to_graph6_bytes", "write_graph6"] + + +def _generate_graph6_bytes(G, nodes, header): + """Yield bytes in the graph6 encoding of a graph. + + `G` is an undirected simple graph. `nodes` is the list of nodes for + which the node-induced subgraph will be encoded; if `nodes` is the + list of all nodes in the graph, the entire graph will be + encoded. `header` is a Boolean that specifies whether to generate + the header ``b'>>graph6<<'`` before the remaining data. + + This function generates `bytes` objects in the following order: + + 1. the header (if requested), + 2. the encoding of the number of nodes, + 3. each character, one-at-a-time, in the encoding of the requested + node-induced subgraph, + 4. a newline character. + + This function raises :exc:`ValueError` if the graph is too large for + the graph6 format (that is, greater than ``2 ** 36`` nodes). + + """ + n = len(G) + if n >= 2**36: + raise ValueError( + "graph6 is only defined if number of nodes is less " "than 2 ** 36" + ) + if header: + yield b">>graph6<<" + for d in n_to_data(n): + yield str.encode(chr(d + 63)) + # This generates the same as `(v in G[u] for u, v in combinations(G, 2))`, + # but in "column-major" order instead of "row-major" order. + bits = (nodes[j] in G[nodes[i]] for j in range(1, n) for i in range(j)) + chunk = list(islice(bits, 6)) + while chunk: + d = sum(b << 5 - i for i, b in enumerate(chunk)) + yield str.encode(chr(d + 63)) + chunk = list(islice(bits, 6)) + yield b"\n" + + +@nx._dispatch(graphs=None) +def from_graph6_bytes(bytes_in): + """Read a simple undirected graph in graph6 format from bytes. + + Parameters + ---------- + bytes_in : bytes + Data in graph6 format, without a trailing newline. + + Returns + ------- + G : Graph + + Raises + ------ + NetworkXError + If bytes_in is unable to be parsed in graph6 format + + ValueError + If any character ``c`` in bytes_in does not satisfy + ``63 <= ord(c) < 127``. + + Examples + -------- + >>> G = nx.from_graph6_bytes(b"A_") + >>> sorted(G.edges()) + [(0, 1)] + + See Also + -------- + read_graph6, write_graph6 + + References + ---------- + .. [1] Graph6 specification + + + """ + + def bits(): + """Returns sequence of individual bits from 6-bit-per-value + list of data values.""" + for d in data: + for i in [5, 4, 3, 2, 1, 0]: + yield (d >> i) & 1 + + if bytes_in.startswith(b">>graph6<<"): + bytes_in = bytes_in[10:] + + data = [c - 63 for c in bytes_in] + if any(c > 63 for c in data): + raise ValueError("each input character must be in range(63, 127)") + + n, data = data_to_n(data) + nd = (n * (n - 1) // 2 + 5) // 6 + if len(data) != nd: + raise NetworkXError( + f"Expected {n * (n - 1) // 2} bits but got {len(data) * 6} in graph6" + ) + + G = nx.Graph() + G.add_nodes_from(range(n)) + for (i, j), b in zip(((i, j) for j in range(1, n) for i in range(j)), bits()): + if b: + G.add_edge(i, j) + + return G + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def to_graph6_bytes(G, nodes=None, header=True): + """Convert a simple undirected graph to bytes in graph6 format. + + Parameters + ---------- + G : Graph (undirected) + + nodes: list or iterable + Nodes are labeled 0...n-1 in the order provided. If None the ordering + given by ``G.nodes()`` is used. + + header: bool + If True add '>>graph6<<' bytes to head of data. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + ValueError + If the graph has at least ``2 ** 36`` nodes; the graph6 format + is only defined for graphs of order less than ``2 ** 36``. + + Examples + -------- + >>> nx.to_graph6_bytes(nx.path_graph(2)) + b'>>graph6< + + """ + if nodes is not None: + G = G.subgraph(nodes) + H = nx.convert_node_labels_to_integers(G) + nodes = sorted(H.nodes()) + return b"".join(_generate_graph6_bytes(H, nodes, header)) + + +@open_file(0, mode="rb") +@nx._dispatch(graphs=None) +def read_graph6(path): + """Read simple undirected graphs in graph6 format from path. + + Parameters + ---------- + path : file or string + File or filename to write. + + Returns + ------- + G : Graph or list of Graphs + If the file contains multiple lines then a list of graphs is returned + + Raises + ------ + NetworkXError + If the string is unable to be parsed in graph6 format + + Examples + -------- + You can read a graph6 file by giving the path to the file:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile(delete=False) as f: + ... _ = f.write(b">>graph6<>> list(G.edges()) + [(0, 1)] + + You can also read a graph6 file by giving an open file-like object:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile() as f: + ... _ = f.write(b">>graph6<>> list(G.edges()) + [(0, 1)] + + See Also + -------- + from_graph6_bytes, write_graph6 + + References + ---------- + .. [1] Graph6 specification + + + """ + glist = [] + for line in path: + line = line.strip() + if not len(line): + continue + glist.append(from_graph6_bytes(line)) + if len(glist) == 1: + return glist[0] + else: + return glist + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@open_file(1, mode="wb") +def write_graph6(G, path, nodes=None, header=True): + """Write a simple undirected graph to a path in graph6 format. + + Parameters + ---------- + G : Graph (undirected) + + path : str + The path naming the file to which to write the graph. + + nodes: list or iterable + Nodes are labeled 0...n-1 in the order provided. If None the ordering + given by ``G.nodes()`` is used. + + header: bool + If True add '>>graph6<<' string to head of data + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + ValueError + If the graph has at least ``2 ** 36`` nodes; the graph6 format + is only defined for graphs of order less than ``2 ** 36``. + + Examples + -------- + You can write a graph6 file by giving the path to a file:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile(delete=False) as f: + ... nx.write_graph6(nx.path_graph(2), f.name) + ... _ = f.seek(0) + ... print(f.read()) + b'>>graph6< + + """ + return write_graph6_file(G, path, nodes=nodes, header=header) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def write_graph6_file(G, f, nodes=None, header=True): + """Write a simple undirected graph to a file-like object in graph6 format. + + Parameters + ---------- + G : Graph (undirected) + + f : file-like object + The file to write. + + nodes: list or iterable + Nodes are labeled 0...n-1 in the order provided. If None the ordering + given by ``G.nodes()`` is used. + + header: bool + If True add '>>graph6<<' string to head of data + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + ValueError + If the graph has at least ``2 ** 36`` nodes; the graph6 format + is only defined for graphs of order less than ``2 ** 36``. + + Examples + -------- + You can write a graph6 file by giving an open file-like object:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile() as f: + ... nx.write_graph6(nx.path_graph(2), f) + ... _ = f.seek(0) + ... print(f.read()) + b'>>graph6< + + """ + if nodes is not None: + G = G.subgraph(nodes) + H = nx.convert_node_labels_to_integers(G) + nodes = sorted(H.nodes()) + for b in _generate_graph6_bytes(H, nodes, header): + f.write(b) + + +def data_to_n(data): + """Read initial one-, four- or eight-unit value from graph6 + integer sequence. + + Return (value, rest of seq.)""" + if data[0] <= 62: + return data[0], data[1:] + if data[1] <= 62: + return (data[1] << 12) + (data[2] << 6) + data[3], data[4:] + return ( + (data[2] << 30) + + (data[3] << 24) + + (data[4] << 18) + + (data[5] << 12) + + (data[6] << 6) + + data[7], + data[8:], + ) + + +def n_to_data(n): + """Convert an integer to one-, four- or eight-unit graph6 sequence. + + This function is undefined if `n` is not in ``range(2 ** 36)``. + + """ + if n <= 62: + return [n] + elif n <= 258047: + return [63, (n >> 12) & 0x3F, (n >> 6) & 0x3F, n & 0x3F] + else: # if n <= 68719476735: + return [ + 63, + 63, + (n >> 30) & 0x3F, + (n >> 24) & 0x3F, + (n >> 18) & 0x3F, + (n >> 12) & 0x3F, + (n >> 6) & 0x3F, + n & 0x3F, + ] diff --git a/phivenv/Lib/site-packages/networkx/readwrite/graphml.py b/phivenv/Lib/site-packages/networkx/readwrite/graphml.py new file mode 100644 index 0000000000000000000000000000000000000000..db696ce47a24a97ab2f0053508b7290ffa97c82a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/graphml.py @@ -0,0 +1,1051 @@ +""" +******* +GraphML +******* +Read and write graphs in GraphML format. + +.. warning:: + + This parser uses the standard xml library present in Python, which is + insecure - see :external+python:mod:`xml` for additional information. + Only parse GraphML files you trust. + +This implementation does not support mixed graphs (directed and unidirected +edges together), hyperedges, nested graphs, or ports. + +"GraphML is a comprehensive and easy-to-use file format for graphs. It +consists of a language core to describe the structural properties of a +graph and a flexible extension mechanism to add application-specific +data. Its main features include support of + + * directed, undirected, and mixed graphs, + * hypergraphs, + * hierarchical graphs, + * graphical representations, + * references to external data, + * application-specific attribute data, and + * light-weight parsers. + +Unlike many other file formats for graphs, GraphML does not use a +custom syntax. Instead, it is based on XML and hence ideally suited as +a common denominator for all kinds of services generating, archiving, +or processing graphs." + +http://graphml.graphdrawing.org/ + +Format +------ +GraphML is an XML format. See +http://graphml.graphdrawing.org/specification.html for the specification and +http://graphml.graphdrawing.org/primer/graphml-primer.html +for examples. +""" +import warnings +from collections import defaultdict + +import networkx as nx +from networkx.utils import open_file + +__all__ = [ + "write_graphml", + "read_graphml", + "generate_graphml", + "write_graphml_xml", + "write_graphml_lxml", + "parse_graphml", + "GraphMLWriter", + "GraphMLReader", +] + + +@open_file(1, mode="wb") +def write_graphml_xml( + G, + path, + encoding="utf-8", + prettyprint=True, + infer_numeric_types=False, + named_key_ids=False, + edge_id_from_attribute=None, +): + """Write G in GraphML XML format to path + + Parameters + ---------- + G : graph + A networkx graph + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be compressed. + encoding : string (optional) + Encoding for text data. + prettyprint : bool (optional) + If True use line breaks and indenting in output XML. + infer_numeric_types : boolean + Determine if numeric types should be generalized. + For example, if edges have both int and float 'weight' attributes, + we infer in GraphML that both are floats. + named_key_ids : bool (optional) + If True use attr.name as value for key elements' id attribute. + edge_id_from_attribute : dict key (optional) + If provided, the graphml edge id is set by looking up the corresponding + edge data attribute keyed by this parameter. If `None` or the key does not exist in edge data, + the edge id is set by the edge key if `G` is a MultiGraph, else the edge id is left unset. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_graphml(G, "test.graphml") + + Notes + ----- + This implementation does not support mixed graphs (directed + and unidirected edges together) hyperedges, nested graphs, or ports. + """ + writer = GraphMLWriter( + encoding=encoding, + prettyprint=prettyprint, + infer_numeric_types=infer_numeric_types, + named_key_ids=named_key_ids, + edge_id_from_attribute=edge_id_from_attribute, + ) + writer.add_graph_element(G) + writer.dump(path) + + +@open_file(1, mode="wb") +def write_graphml_lxml( + G, + path, + encoding="utf-8", + prettyprint=True, + infer_numeric_types=False, + named_key_ids=False, + edge_id_from_attribute=None, +): + """Write G in GraphML XML format to path + + This function uses the LXML framework and should be faster than + the version using the xml library. + + Parameters + ---------- + G : graph + A networkx graph + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be compressed. + encoding : string (optional) + Encoding for text data. + prettyprint : bool (optional) + If True use line breaks and indenting in output XML. + infer_numeric_types : boolean + Determine if numeric types should be generalized. + For example, if edges have both int and float 'weight' attributes, + we infer in GraphML that both are floats. + named_key_ids : bool (optional) + If True use attr.name as value for key elements' id attribute. + edge_id_from_attribute : dict key (optional) + If provided, the graphml edge id is set by looking up the corresponding + edge data attribute keyed by this parameter. If `None` or the key does not exist in edge data, + the edge id is set by the edge key if `G` is a MultiGraph, else the edge id is left unset. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_graphml_lxml(G, "fourpath.graphml") + + Notes + ----- + This implementation does not support mixed graphs (directed + and unidirected edges together) hyperedges, nested graphs, or ports. + """ + try: + import lxml.etree as lxmletree + except ImportError: + return write_graphml_xml( + G, + path, + encoding, + prettyprint, + infer_numeric_types, + named_key_ids, + edge_id_from_attribute, + ) + + writer = GraphMLWriterLxml( + path, + graph=G, + encoding=encoding, + prettyprint=prettyprint, + infer_numeric_types=infer_numeric_types, + named_key_ids=named_key_ids, + edge_id_from_attribute=edge_id_from_attribute, + ) + writer.dump() + + +def generate_graphml( + G, + encoding="utf-8", + prettyprint=True, + named_key_ids=False, + edge_id_from_attribute=None, +): + """Generate GraphML lines for G + + Parameters + ---------- + G : graph + A networkx graph + encoding : string (optional) + Encoding for text data. + prettyprint : bool (optional) + If True use line breaks and indenting in output XML. + named_key_ids : bool (optional) + If True use attr.name as value for key elements' id attribute. + edge_id_from_attribute : dict key (optional) + If provided, the graphml edge id is set by looking up the corresponding + edge data attribute keyed by this parameter. If `None` or the key does not exist in edge data, + the edge id is set by the edge key if `G` is a MultiGraph, else the edge id is left unset. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> linefeed = chr(10) # linefeed = \n + >>> s = linefeed.join(nx.generate_graphml(G)) + >>> for line in nx.generate_graphml(G): # doctest: +SKIP + ... print(line) + + Notes + ----- + This implementation does not support mixed graphs (directed and unidirected + edges together) hyperedges, nested graphs, or ports. + """ + writer = GraphMLWriter( + encoding=encoding, + prettyprint=prettyprint, + named_key_ids=named_key_ids, + edge_id_from_attribute=edge_id_from_attribute, + ) + writer.add_graph_element(G) + yield from str(writer).splitlines() + + +@open_file(0, mode="rb") +@nx._dispatch(graphs=None) +def read_graphml(path, node_type=str, edge_key_type=int, force_multigraph=False): + """Read graph in GraphML format from path. + + Parameters + ---------- + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be compressed. + + node_type: Python type (default: str) + Convert node ids to this type + + edge_key_type: Python type (default: int) + Convert graphml edge ids to this type. Multigraphs use id as edge key. + Non-multigraphs add to edge attribute dict with name "id". + + force_multigraph : bool (default: False) + If True, return a multigraph with edge keys. If False (the default) + return a multigraph when multiedges are in the graph. + + Returns + ------- + graph: NetworkX graph + If parallel edges are present or `force_multigraph=True` then + a MultiGraph or MultiDiGraph is returned. Otherwise a Graph/DiGraph. + The returned graph is directed if the file indicates it should be. + + Notes + ----- + Default node and edge attributes are not propagated to each node and edge. + They can be obtained from `G.graph` and applied to node and edge attributes + if desired using something like this: + + >>> default_color = G.graph["node_default"]["color"] # doctest: +SKIP + >>> for node, data in G.nodes(data=True): # doctest: +SKIP + ... if "color" not in data: + ... data["color"] = default_color + >>> default_color = G.graph["edge_default"]["color"] # doctest: +SKIP + >>> for u, v, data in G.edges(data=True): # doctest: +SKIP + ... if "color" not in data: + ... data["color"] = default_color + + This implementation does not support mixed graphs (directed and unidirected + edges together), hypergraphs, nested graphs, or ports. + + For multigraphs the GraphML edge "id" will be used as the edge + key. If not specified then they "key" attribute will be used. If + there is no "key" attribute a default NetworkX multigraph edge key + will be provided. + + Files with the yEd "yfiles" extension can be read. The type of the node's + shape is preserved in the `shape_type` node attribute. + + yEd compressed files ("file.graphmlz" extension) can be read by renaming + the file to "file.graphml.gz". + + """ + reader = GraphMLReader(node_type, edge_key_type, force_multigraph) + # need to check for multiple graphs + glist = list(reader(path=path)) + if len(glist) == 0: + # If no graph comes back, try looking for an incomplete header + header = b'' + path.seek(0) + old_bytes = path.read() + new_bytes = old_bytes.replace(b"", header) + glist = list(reader(string=new_bytes)) + if len(glist) == 0: + raise nx.NetworkXError("file not successfully read as graphml") + return glist[0] + + +@nx._dispatch(graphs=None) +def parse_graphml( + graphml_string, node_type=str, edge_key_type=int, force_multigraph=False +): + """Read graph in GraphML format from string. + + Parameters + ---------- + graphml_string : string + String containing graphml information + (e.g., contents of a graphml file). + + node_type: Python type (default: str) + Convert node ids to this type + + edge_key_type: Python type (default: int) + Convert graphml edge ids to this type. Multigraphs use id as edge key. + Non-multigraphs add to edge attribute dict with name "id". + + force_multigraph : bool (default: False) + If True, return a multigraph with edge keys. If False (the default) + return a multigraph when multiedges are in the graph. + + + Returns + ------- + graph: NetworkX graph + If no parallel edges are found a Graph or DiGraph is returned. + Otherwise a MultiGraph or MultiDiGraph is returned. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> linefeed = chr(10) # linefeed = \n + >>> s = linefeed.join(nx.generate_graphml(G)) + >>> H = nx.parse_graphml(s) + + Notes + ----- + Default node and edge attributes are not propagated to each node and edge. + They can be obtained from `G.graph` and applied to node and edge attributes + if desired using something like this: + + >>> default_color = G.graph["node_default"]["color"] # doctest: +SKIP + >>> for node, data in G.nodes(data=True): # doctest: +SKIP + ... if "color" not in data: + ... data["color"] = default_color + >>> default_color = G.graph["edge_default"]["color"] # doctest: +SKIP + >>> for u, v, data in G.edges(data=True): # doctest: +SKIP + ... if "color" not in data: + ... data["color"] = default_color + + This implementation does not support mixed graphs (directed and unidirected + edges together), hypergraphs, nested graphs, or ports. + + For multigraphs the GraphML edge "id" will be used as the edge + key. If not specified then they "key" attribute will be used. If + there is no "key" attribute a default NetworkX multigraph edge key + will be provided. + + """ + reader = GraphMLReader(node_type, edge_key_type, force_multigraph) + # need to check for multiple graphs + glist = list(reader(string=graphml_string)) + if len(glist) == 0: + # If no graph comes back, try looking for an incomplete header + header = '' + new_string = graphml_string.replace("", header) + glist = list(reader(string=new_string)) + if len(glist) == 0: + raise nx.NetworkXError("file not successfully read as graphml") + return glist[0] + + +class GraphML: + NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns" + NS_XSI = "http://www.w3.org/2001/XMLSchema-instance" + # xmlns:y="http://www.yworks.com/xml/graphml" + NS_Y = "http://www.yworks.com/xml/graphml" + SCHEMALOCATION = " ".join( + [ + "http://graphml.graphdrawing.org/xmlns", + "http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd", + ] + ) + + def construct_types(self): + types = [ + (int, "integer"), # for Gephi GraphML bug + (str, "yfiles"), + (str, "string"), + (int, "int"), + (int, "long"), + (float, "float"), + (float, "double"), + (bool, "boolean"), + ] + + # These additions to types allow writing numpy types + try: + import numpy as np + except: + pass + else: + # prepend so that python types are created upon read (last entry wins) + types = [ + (np.float64, "float"), + (np.float32, "float"), + (np.float16, "float"), + (np.int_, "int"), + (np.int8, "int"), + (np.int16, "int"), + (np.int32, "int"), + (np.int64, "int"), + (np.uint8, "int"), + (np.uint16, "int"), + (np.uint32, "int"), + (np.uint64, "int"), + (np.int_, "int"), + (np.intc, "int"), + (np.intp, "int"), + ] + types + + self.xml_type = dict(types) + self.python_type = dict(reversed(a) for a in types) + + # This page says that data types in GraphML follow Java(TM). + # http://graphml.graphdrawing.org/primer/graphml-primer.html#AttributesDefinition + # true and false are the only boolean literals: + # http://en.wikibooks.org/wiki/Java_Programming/Literals#Boolean_Literals + convert_bool = { + # We use data.lower() in actual use. + "true": True, + "false": False, + # Include integer strings for convenience. + "0": False, + 0: False, + "1": True, + 1: True, + } + + def get_xml_type(self, key): + """Wrapper around the xml_type dict that raises a more informative + exception message when a user attempts to use data of a type not + supported by GraphML.""" + try: + return self.xml_type[key] + except KeyError as err: + raise TypeError( + f"GraphML does not support type {type(key)} as data values." + ) from err + + +class GraphMLWriter(GraphML): + def __init__( + self, + graph=None, + encoding="utf-8", + prettyprint=True, + infer_numeric_types=False, + named_key_ids=False, + edge_id_from_attribute=None, + ): + self.construct_types() + from xml.etree.ElementTree import Element + + self.myElement = Element + + self.infer_numeric_types = infer_numeric_types + self.prettyprint = prettyprint + self.named_key_ids = named_key_ids + self.edge_id_from_attribute = edge_id_from_attribute + self.encoding = encoding + self.xml = self.myElement( + "graphml", + { + "xmlns": self.NS_GRAPHML, + "xmlns:xsi": self.NS_XSI, + "xsi:schemaLocation": self.SCHEMALOCATION, + }, + ) + self.keys = {} + self.attributes = defaultdict(list) + self.attribute_types = defaultdict(set) + + if graph is not None: + self.add_graph_element(graph) + + def __str__(self): + from xml.etree.ElementTree import tostring + + if self.prettyprint: + self.indent(self.xml) + s = tostring(self.xml).decode(self.encoding) + return s + + def attr_type(self, name, scope, value): + """Infer the attribute type of data named name. Currently this only + supports inference of numeric types. + + If self.infer_numeric_types is false, type is used. Otherwise, pick the + most general of types found across all values with name and scope. This + means edges with data named 'weight' are treated separately from nodes + with data named 'weight'. + """ + if self.infer_numeric_types: + types = self.attribute_types[(name, scope)] + + if len(types) > 1: + types = {self.get_xml_type(t) for t in types} + if "string" in types: + return str + elif "float" in types or "double" in types: + return float + else: + return int + else: + return list(types)[0] + else: + return type(value) + + def get_key(self, name, attr_type, scope, default): + keys_key = (name, attr_type, scope) + try: + return self.keys[keys_key] + except KeyError: + if self.named_key_ids: + new_id = name + else: + new_id = f"d{len(list(self.keys))}" + + self.keys[keys_key] = new_id + key_kwargs = { + "id": new_id, + "for": scope, + "attr.name": name, + "attr.type": attr_type, + } + key_element = self.myElement("key", **key_kwargs) + # add subelement for data default value if present + if default is not None: + default_element = self.myElement("default") + default_element.text = str(default) + key_element.append(default_element) + self.xml.insert(0, key_element) + return new_id + + def add_data(self, name, element_type, value, scope="all", default=None): + """ + Make a data element for an edge or a node. Keep a log of the + type in the keys table. + """ + if element_type not in self.xml_type: + raise nx.NetworkXError( + f"GraphML writer does not support {element_type} as data values." + ) + keyid = self.get_key(name, self.get_xml_type(element_type), scope, default) + data_element = self.myElement("data", key=keyid) + data_element.text = str(value) + return data_element + + def add_attributes(self, scope, xml_obj, data, default): + """Appends attribute data to edges or nodes, and stores type information + to be added later. See add_graph_element. + """ + for k, v in data.items(): + self.attribute_types[(str(k), scope)].add(type(v)) + self.attributes[xml_obj].append([k, v, scope, default.get(k)]) + + def add_nodes(self, G, graph_element): + default = G.graph.get("node_default", {}) + for node, data in G.nodes(data=True): + node_element = self.myElement("node", id=str(node)) + self.add_attributes("node", node_element, data, default) + graph_element.append(node_element) + + def add_edges(self, G, graph_element): + if G.is_multigraph(): + for u, v, key, data in G.edges(data=True, keys=True): + edge_element = self.myElement( + "edge", + source=str(u), + target=str(v), + id=str(data.get(self.edge_id_from_attribute)) + if self.edge_id_from_attribute + and self.edge_id_from_attribute in data + else str(key), + ) + default = G.graph.get("edge_default", {}) + self.add_attributes("edge", edge_element, data, default) + graph_element.append(edge_element) + else: + for u, v, data in G.edges(data=True): + if self.edge_id_from_attribute and self.edge_id_from_attribute in data: + # select attribute to be edge id + edge_element = self.myElement( + "edge", + source=str(u), + target=str(v), + id=str(data.get(self.edge_id_from_attribute)), + ) + else: + # default: no edge id + edge_element = self.myElement("edge", source=str(u), target=str(v)) + default = G.graph.get("edge_default", {}) + self.add_attributes("edge", edge_element, data, default) + graph_element.append(edge_element) + + def add_graph_element(self, G): + """ + Serialize graph G in GraphML to the stream. + """ + if G.is_directed(): + default_edge_type = "directed" + else: + default_edge_type = "undirected" + + graphid = G.graph.pop("id", None) + if graphid is None: + graph_element = self.myElement("graph", edgedefault=default_edge_type) + else: + graph_element = self.myElement( + "graph", edgedefault=default_edge_type, id=graphid + ) + default = {} + data = { + k: v + for (k, v) in G.graph.items() + if k not in ["node_default", "edge_default"] + } + self.add_attributes("graph", graph_element, data, default) + self.add_nodes(G, graph_element) + self.add_edges(G, graph_element) + + # self.attributes contains a mapping from XML Objects to a list of + # data that needs to be added to them. + # We postpone processing in order to do type inference/generalization. + # See self.attr_type + for xml_obj, data in self.attributes.items(): + for k, v, scope, default in data: + xml_obj.append( + self.add_data( + str(k), self.attr_type(k, scope, v), str(v), scope, default + ) + ) + self.xml.append(graph_element) + + def add_graphs(self, graph_list): + """Add many graphs to this GraphML document.""" + for G in graph_list: + self.add_graph_element(G) + + def dump(self, stream): + from xml.etree.ElementTree import ElementTree + + if self.prettyprint: + self.indent(self.xml) + document = ElementTree(self.xml) + document.write(stream, encoding=self.encoding, xml_declaration=True) + + def indent(self, elem, level=0): + # in-place prettyprint formatter + i = "\n" + level * " " + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + " " + if not elem.tail or not elem.tail.strip(): + elem.tail = i + for elem in elem: + self.indent(elem, level + 1) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i + + +class IncrementalElement: + """Wrapper for _IncrementalWriter providing an Element like interface. + + This wrapper does not intend to be a complete implementation but rather to + deal with those calls used in GraphMLWriter. + """ + + def __init__(self, xml, prettyprint): + self.xml = xml + self.prettyprint = prettyprint + + def append(self, element): + self.xml.write(element, pretty_print=self.prettyprint) + + +class GraphMLWriterLxml(GraphMLWriter): + def __init__( + self, + path, + graph=None, + encoding="utf-8", + prettyprint=True, + infer_numeric_types=False, + named_key_ids=False, + edge_id_from_attribute=None, + ): + self.construct_types() + import lxml.etree as lxmletree + + self.myElement = lxmletree.Element + + self._encoding = encoding + self._prettyprint = prettyprint + self.named_key_ids = named_key_ids + self.edge_id_from_attribute = edge_id_from_attribute + self.infer_numeric_types = infer_numeric_types + + self._xml_base = lxmletree.xmlfile(path, encoding=encoding) + self._xml = self._xml_base.__enter__() + self._xml.write_declaration() + + # We need to have a xml variable that support insertion. This call is + # used for adding the keys to the document. + # We will store those keys in a plain list, and then after the graph + # element is closed we will add them to the main graphml element. + self.xml = [] + self._keys = self.xml + self._graphml = self._xml.element( + "graphml", + { + "xmlns": self.NS_GRAPHML, + "xmlns:xsi": self.NS_XSI, + "xsi:schemaLocation": self.SCHEMALOCATION, + }, + ) + self._graphml.__enter__() + self.keys = {} + self.attribute_types = defaultdict(set) + + if graph is not None: + self.add_graph_element(graph) + + def add_graph_element(self, G): + """ + Serialize graph G in GraphML to the stream. + """ + if G.is_directed(): + default_edge_type = "directed" + else: + default_edge_type = "undirected" + + graphid = G.graph.pop("id", None) + if graphid is None: + graph_element = self._xml.element("graph", edgedefault=default_edge_type) + else: + graph_element = self._xml.element( + "graph", edgedefault=default_edge_type, id=graphid + ) + + # gather attributes types for the whole graph + # to find the most general numeric format needed. + # Then pass through attributes to create key_id for each. + graphdata = { + k: v + for k, v in G.graph.items() + if k not in ("node_default", "edge_default") + } + node_default = G.graph.get("node_default", {}) + edge_default = G.graph.get("edge_default", {}) + # Graph attributes + for k, v in graphdata.items(): + self.attribute_types[(str(k), "graph")].add(type(v)) + for k, v in graphdata.items(): + element_type = self.get_xml_type(self.attr_type(k, "graph", v)) + self.get_key(str(k), element_type, "graph", None) + # Nodes and data + for node, d in G.nodes(data=True): + for k, v in d.items(): + self.attribute_types[(str(k), "node")].add(type(v)) + for node, d in G.nodes(data=True): + for k, v in d.items(): + T = self.get_xml_type(self.attr_type(k, "node", v)) + self.get_key(str(k), T, "node", node_default.get(k)) + # Edges and data + if G.is_multigraph(): + for u, v, ekey, d in G.edges(keys=True, data=True): + for k, v in d.items(): + self.attribute_types[(str(k), "edge")].add(type(v)) + for u, v, ekey, d in G.edges(keys=True, data=True): + for k, v in d.items(): + T = self.get_xml_type(self.attr_type(k, "edge", v)) + self.get_key(str(k), T, "edge", edge_default.get(k)) + else: + for u, v, d in G.edges(data=True): + for k, v in d.items(): + self.attribute_types[(str(k), "edge")].add(type(v)) + for u, v, d in G.edges(data=True): + for k, v in d.items(): + T = self.get_xml_type(self.attr_type(k, "edge", v)) + self.get_key(str(k), T, "edge", edge_default.get(k)) + + # Now add attribute keys to the xml file + for key in self.xml: + self._xml.write(key, pretty_print=self._prettyprint) + + # The incremental_writer writes each node/edge as it is created + incremental_writer = IncrementalElement(self._xml, self._prettyprint) + with graph_element: + self.add_attributes("graph", incremental_writer, graphdata, {}) + self.add_nodes(G, incremental_writer) # adds attributes too + self.add_edges(G, incremental_writer) # adds attributes too + + def add_attributes(self, scope, xml_obj, data, default): + """Appends attribute data.""" + for k, v in data.items(): + data_element = self.add_data( + str(k), self.attr_type(str(k), scope, v), str(v), scope, default.get(k) + ) + xml_obj.append(data_element) + + def __str__(self): + return object.__str__(self) + + def dump(self): + self._graphml.__exit__(None, None, None) + self._xml_base.__exit__(None, None, None) + + +# default is lxml is present. +write_graphml = write_graphml_lxml + + +class GraphMLReader(GraphML): + """Read a GraphML document. Produces NetworkX graph objects.""" + + def __init__(self, node_type=str, edge_key_type=int, force_multigraph=False): + self.construct_types() + self.node_type = node_type + self.edge_key_type = edge_key_type + self.multigraph = force_multigraph # If False, test for multiedges + self.edge_ids = {} # dict mapping (u,v) tuples to edge id attributes + + def __call__(self, path=None, string=None): + from xml.etree.ElementTree import ElementTree, fromstring + + if path is not None: + self.xml = ElementTree(file=path) + elif string is not None: + self.xml = fromstring(string) + else: + raise ValueError("Must specify either 'path' or 'string' as kwarg") + (keys, defaults) = self.find_graphml_keys(self.xml) + for g in self.xml.findall(f"{{{self.NS_GRAPHML}}}graph"): + yield self.make_graph(g, keys, defaults) + + def make_graph(self, graph_xml, graphml_keys, defaults, G=None): + # set default graph type + edgedefault = graph_xml.get("edgedefault", None) + if G is None: + if edgedefault == "directed": + G = nx.MultiDiGraph() + else: + G = nx.MultiGraph() + # set defaults for graph attributes + G.graph["node_default"] = {} + G.graph["edge_default"] = {} + for key_id, value in defaults.items(): + key_for = graphml_keys[key_id]["for"] + name = graphml_keys[key_id]["name"] + python_type = graphml_keys[key_id]["type"] + if key_for == "node": + G.graph["node_default"].update({name: python_type(value)}) + if key_for == "edge": + G.graph["edge_default"].update({name: python_type(value)}) + # hyperedges are not supported + hyperedge = graph_xml.find(f"{{{self.NS_GRAPHML}}}hyperedge") + if hyperedge is not None: + raise nx.NetworkXError("GraphML reader doesn't support hyperedges") + # add nodes + for node_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}node"): + self.add_node(G, node_xml, graphml_keys, defaults) + # add edges + for edge_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}edge"): + self.add_edge(G, edge_xml, graphml_keys) + # add graph data + data = self.decode_data_elements(graphml_keys, graph_xml) + G.graph.update(data) + + # switch to Graph or DiGraph if no parallel edges were found + if self.multigraph: + return G + + G = nx.DiGraph(G) if G.is_directed() else nx.Graph(G) + # add explicit edge "id" from file as attribute in NX graph. + nx.set_edge_attributes(G, values=self.edge_ids, name="id") + return G + + def add_node(self, G, node_xml, graphml_keys, defaults): + """Add a node to the graph.""" + # warn on finding unsupported ports tag + ports = node_xml.find(f"{{{self.NS_GRAPHML}}}port") + if ports is not None: + warnings.warn("GraphML port tag not supported.") + # find the node by id and cast it to the appropriate type + node_id = self.node_type(node_xml.get("id")) + # get data/attributes for node + data = self.decode_data_elements(graphml_keys, node_xml) + G.add_node(node_id, **data) + # get child nodes + if node_xml.attrib.get("yfiles.foldertype") == "group": + graph_xml = node_xml.find(f"{{{self.NS_GRAPHML}}}graph") + self.make_graph(graph_xml, graphml_keys, defaults, G) + + def add_edge(self, G, edge_element, graphml_keys): + """Add an edge to the graph.""" + # warn on finding unsupported ports tag + ports = edge_element.find(f"{{{self.NS_GRAPHML}}}port") + if ports is not None: + warnings.warn("GraphML port tag not supported.") + + # raise error if we find mixed directed and undirected edges + directed = edge_element.get("directed") + if G.is_directed() and directed == "false": + msg = "directed=false edge found in directed graph." + raise nx.NetworkXError(msg) + if (not G.is_directed()) and directed == "true": + msg = "directed=true edge found in undirected graph." + raise nx.NetworkXError(msg) + + source = self.node_type(edge_element.get("source")) + target = self.node_type(edge_element.get("target")) + data = self.decode_data_elements(graphml_keys, edge_element) + # GraphML stores edge ids as an attribute + # NetworkX uses them as keys in multigraphs too if no key + # attribute is specified + edge_id = edge_element.get("id") + if edge_id: + # self.edge_ids is used by `make_graph` method for non-multigraphs + self.edge_ids[source, target] = edge_id + try: + edge_id = self.edge_key_type(edge_id) + except ValueError: # Could not convert. + pass + else: + edge_id = data.get("key") + + if G.has_edge(source, target): + # mark this as a multigraph + self.multigraph = True + + # Use add_edges_from to avoid error with add_edge when `'key' in data` + # Note there is only one edge here... + G.add_edges_from([(source, target, edge_id, data)]) + + def decode_data_elements(self, graphml_keys, obj_xml): + """Use the key information to decode the data XML if present.""" + data = {} + for data_element in obj_xml.findall(f"{{{self.NS_GRAPHML}}}data"): + key = data_element.get("key") + try: + data_name = graphml_keys[key]["name"] + data_type = graphml_keys[key]["type"] + except KeyError as err: + raise nx.NetworkXError(f"Bad GraphML data: no key {key}") from err + text = data_element.text + # assume anything with subelements is a yfiles extension + if text is not None and len(list(data_element)) == 0: + if data_type == bool: + # Ignore cases. + # http://docs.oracle.com/javase/6/docs/api/java/lang/ + # Boolean.html#parseBoolean%28java.lang.String%29 + data[data_name] = self.convert_bool[text.lower()] + else: + data[data_name] = data_type(text) + elif len(list(data_element)) > 0: + # Assume yfiles as subelements, try to extract node_label + node_label = None + # set GenericNode's configuration as shape type + gn = data_element.find(f"{{{self.NS_Y}}}GenericNode") + if gn: + data["shape_type"] = gn.get("configuration") + for node_type in ["GenericNode", "ShapeNode", "SVGNode", "ImageNode"]: + pref = f"{{{self.NS_Y}}}{node_type}/{{{self.NS_Y}}}" + geometry = data_element.find(f"{pref}Geometry") + if geometry is not None: + data["x"] = geometry.get("x") + data["y"] = geometry.get("y") + if node_label is None: + node_label = data_element.find(f"{pref}NodeLabel") + shape = data_element.find(f"{pref}Shape") + if shape is not None: + data["shape_type"] = shape.get("type") + if node_label is not None: + data["label"] = node_label.text + + # check all the different types of edges available in yEd. + for edge_type in [ + "PolyLineEdge", + "SplineEdge", + "QuadCurveEdge", + "BezierEdge", + "ArcEdge", + ]: + pref = f"{{{self.NS_Y}}}{edge_type}/{{{self.NS_Y}}}" + edge_label = data_element.find(f"{pref}EdgeLabel") + if edge_label is not None: + break + + if edge_label is not None: + data["label"] = edge_label.text + return data + + def find_graphml_keys(self, graph_element): + """Extracts all the keys and key defaults from the xml.""" + graphml_keys = {} + graphml_key_defaults = {} + for k in graph_element.findall(f"{{{self.NS_GRAPHML}}}key"): + attr_id = k.get("id") + attr_type = k.get("attr.type") + attr_name = k.get("attr.name") + yfiles_type = k.get("yfiles.type") + if yfiles_type is not None: + attr_name = yfiles_type + attr_type = "yfiles" + if attr_type is None: + attr_type = "string" + warnings.warn(f"No key type for id {attr_id}. Using string") + if attr_name is None: + raise nx.NetworkXError(f"Unknown key for id {attr_id}.") + graphml_keys[attr_id] = { + "name": attr_name, + "type": self.python_type[attr_type], + "for": k.get("for"), + } + # check for "default" sub-element of key element + default = k.find(f"{{{self.NS_GRAPHML}}}default") + if default is not None: + # Handle default values identically to data element values + python_type = graphml_keys[attr_id]["type"] + if python_type == bool: + graphml_key_defaults[attr_id] = self.convert_bool[ + default.text.lower() + ] + else: + graphml_key_defaults[attr_id] = python_type(default.text) + return graphml_keys, graphml_key_defaults diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__init__.py b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ee9d1240efd4691fe52d91690e99428ac75e11c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__init__.py @@ -0,0 +1,18 @@ +""" +********* +JSON data +********* +Generate and parse JSON serializable data for NetworkX graphs. + +These formats are suitable for use with the d3.js examples https://d3js.org/ + +The three formats that you can generate with NetworkX are: + + - node-link like in the d3.js example https://bl.ocks.org/mbostock/4062045 + - tree like in the d3.js example https://bl.ocks.org/mbostock/4063550 + - adjacency like in the d3.js example https://bost.ocks.org/mike/miserables/ +""" +from networkx.readwrite.json_graph.node_link import * +from networkx.readwrite.json_graph.adjacency import * +from networkx.readwrite.json_graph.tree import * +from networkx.readwrite.json_graph.cytoscape import * diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5268e9705365617048cf9e2f37f823ed4e147f02 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/adjacency.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/adjacency.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b4c5c57d6a8976eb2eccd69a6ff07ddd82944ed Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/adjacency.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fd02bc54ad729ece44f937c0d2a64ce44ab5b04 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/node_link.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/node_link.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebd3bb93ab379a28a5223136bf25e5c8d25951d3 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/node_link.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/tree.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/tree.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d57f409d83a97559f4fb23ebaadd9ba62b31088a Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/__pycache__/tree.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/adjacency.py b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/adjacency.py new file mode 100644 index 0000000000000000000000000000000000000000..75695d3dff527f8834c0237d63c25ff0600cb8d7 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/adjacency.py @@ -0,0 +1,156 @@ +import networkx as nx + +__all__ = ["adjacency_data", "adjacency_graph"] + +_attrs = {"id": "id", "key": "key"} + + +def adjacency_data(G, attrs=_attrs): + """Returns data in adjacency format that is suitable for JSON serialization + and use in JavaScript documents. + + Parameters + ---------- + G : NetworkX graph + + attrs : dict + A dictionary that contains two keys 'id' and 'key'. The corresponding + values provide the attribute names for storing NetworkX-internal graph + data. The values should be unique. Default value: + :samp:`dict(id='id', key='key')`. + + If some user-defined graph data use these attribute names as data keys, + they may be silently dropped. + + Returns + ------- + data : dict + A dictionary with adjacency formatted data. + + Raises + ------ + NetworkXError + If values in attrs are not unique. + + Examples + -------- + >>> from networkx.readwrite import json_graph + >>> G = nx.Graph([(1, 2)]) + >>> data = json_graph.adjacency_data(G) + + To serialize with json + + >>> import json + >>> s = json.dumps(data) + + Notes + ----- + Graph, node, and link attributes will be written when using this format + but attribute keys must be strings if you want to serialize the resulting + data with JSON. + + The default value of attrs will be changed in a future release of NetworkX. + + See Also + -------- + adjacency_graph, node_link_data, tree_data + """ + multigraph = G.is_multigraph() + id_ = attrs["id"] + # Allow 'key' to be omitted from attrs if the graph is not a multigraph. + key = None if not multigraph else attrs["key"] + if id_ == key: + raise nx.NetworkXError("Attribute names are not unique.") + data = {} + data["directed"] = G.is_directed() + data["multigraph"] = multigraph + data["graph"] = list(G.graph.items()) + data["nodes"] = [] + data["adjacency"] = [] + for n, nbrdict in G.adjacency(): + data["nodes"].append({**G.nodes[n], id_: n}) + adj = [] + if multigraph: + for nbr, keys in nbrdict.items(): + for k, d in keys.items(): + adj.append({**d, id_: nbr, key: k}) + else: + for nbr, d in nbrdict.items(): + adj.append({**d, id_: nbr}) + data["adjacency"].append(adj) + return data + + +@nx._dispatch(graphs=None) +def adjacency_graph(data, directed=False, multigraph=True, attrs=_attrs): + """Returns graph from adjacency data format. + + Parameters + ---------- + data : dict + Adjacency list formatted graph data + + directed : bool + If True, and direction not specified in data, return a directed graph. + + multigraph : bool + If True, and multigraph not specified in data, return a multigraph. + + attrs : dict + A dictionary that contains two keys 'id' and 'key'. The corresponding + values provide the attribute names for storing NetworkX-internal graph + data. The values should be unique. Default value: + :samp:`dict(id='id', key='key')`. + + Returns + ------- + G : NetworkX graph + A NetworkX graph object + + Examples + -------- + >>> from networkx.readwrite import json_graph + >>> G = nx.Graph([(1, 2)]) + >>> data = json_graph.adjacency_data(G) + >>> H = json_graph.adjacency_graph(data) + + Notes + ----- + The default value of attrs will be changed in a future release of NetworkX. + + See Also + -------- + adjacency_graph, node_link_data, tree_data + """ + multigraph = data.get("multigraph", multigraph) + directed = data.get("directed", directed) + if multigraph: + graph = nx.MultiGraph() + else: + graph = nx.Graph() + if directed: + graph = graph.to_directed() + id_ = attrs["id"] + # Allow 'key' to be omitted from attrs if the graph is not a multigraph. + key = None if not multigraph else attrs["key"] + graph.graph = dict(data.get("graph", [])) + mapping = [] + for d in data["nodes"]: + node_data = d.copy() + node = node_data.pop(id_) + mapping.append(node) + graph.add_node(node) + graph.nodes[node].update(node_data) + for i, d in enumerate(data["adjacency"]): + source = mapping[i] + for tdata in d: + target_data = tdata.copy() + target = target_data.pop(id_) + if not multigraph: + graph.add_edge(source, target) + graph[source][target].update(target_data) + else: + ky = target_data.pop(key, None) + graph.add_edge(source, target, key=ky) + graph[source][target][ky].update(target_data) + return graph diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/cytoscape.py b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/cytoscape.py new file mode 100644 index 0000000000000000000000000000000000000000..7689192d471206d5ff9e01be91b09f2e55cfe1ea --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/cytoscape.py @@ -0,0 +1,174 @@ +import networkx as nx + +__all__ = ["cytoscape_data", "cytoscape_graph"] + + +def cytoscape_data(G, name="name", ident="id"): + """Returns data in Cytoscape JSON format (cyjs). + + Parameters + ---------- + G : NetworkX Graph + The graph to convert to cytoscape format + name : string + A string which is mapped to the 'name' node element in cyjs format. + Must not have the same value as `ident`. + ident : string + A string which is mapped to the 'id' node element in cyjs format. + Must not have the same value as `name`. + + Returns + ------- + data: dict + A dictionary with cyjs formatted data. + + Raises + ------ + NetworkXError + If the values for `name` and `ident` are identical. + + See Also + -------- + cytoscape_graph: convert a dictionary in cyjs format to a graph + + References + ---------- + .. [1] Cytoscape user's manual: + http://manual.cytoscape.org/en/stable/index.html + + Examples + -------- + >>> G = nx.path_graph(2) + >>> nx.cytoscape_data(G) # doctest: +SKIP + {'data': [], + 'directed': False, + 'multigraph': False, + 'elements': {'nodes': [{'data': {'id': '0', 'value': 0, 'name': '0'}}, + {'data': {'id': '1', 'value': 1, 'name': '1'}}], + 'edges': [{'data': {'source': 0, 'target': 1}}]}} + """ + if name == ident: + raise nx.NetworkXError("name and ident must be different.") + + jsondata = {"data": list(G.graph.items())} + jsondata["directed"] = G.is_directed() + jsondata["multigraph"] = G.is_multigraph() + jsondata["elements"] = {"nodes": [], "edges": []} + nodes = jsondata["elements"]["nodes"] + edges = jsondata["elements"]["edges"] + + for i, j in G.nodes.items(): + n = {"data": j.copy()} + n["data"]["id"] = j.get(ident) or str(i) + n["data"]["value"] = i + n["data"]["name"] = j.get(name) or str(i) + nodes.append(n) + + if G.is_multigraph(): + for e in G.edges(keys=True): + n = {"data": G.adj[e[0]][e[1]][e[2]].copy()} + n["data"]["source"] = e[0] + n["data"]["target"] = e[1] + n["data"]["key"] = e[2] + edges.append(n) + else: + for e in G.edges(): + n = {"data": G.adj[e[0]][e[1]].copy()} + n["data"]["source"] = e[0] + n["data"]["target"] = e[1] + edges.append(n) + return jsondata + + +@nx._dispatch(graphs=None) +def cytoscape_graph(data, name="name", ident="id"): + """ + Create a NetworkX graph from a dictionary in cytoscape JSON format. + + Parameters + ---------- + data : dict + A dictionary of data conforming to cytoscape JSON format. + name : string + A string which is mapped to the 'name' node element in cyjs format. + Must not have the same value as `ident`. + ident : string + A string which is mapped to the 'id' node element in cyjs format. + Must not have the same value as `name`. + + Returns + ------- + graph : a NetworkX graph instance + The `graph` can be an instance of `Graph`, `DiGraph`, `MultiGraph`, or + `MultiDiGraph` depending on the input data. + + Raises + ------ + NetworkXError + If the `name` and `ident` attributes are identical. + + See Also + -------- + cytoscape_data: convert a NetworkX graph to a dict in cyjs format + + References + ---------- + .. [1] Cytoscape user's manual: + http://manual.cytoscape.org/en/stable/index.html + + Examples + -------- + >>> data_dict = { + ... 'data': [], + ... 'directed': False, + ... 'multigraph': False, + ... 'elements': {'nodes': [{'data': {'id': '0', 'value': 0, 'name': '0'}}, + ... {'data': {'id': '1', 'value': 1, 'name': '1'}}], + ... 'edges': [{'data': {'source': 0, 'target': 1}}]} + ... } + >>> G = nx.cytoscape_graph(data_dict) + >>> G.name + '' + >>> G.nodes() + NodeView((0, 1)) + >>> G.nodes(data=True)[0] + {'id': '0', 'value': 0, 'name': '0'} + >>> G.edges(data=True) + EdgeDataView([(0, 1, {'source': 0, 'target': 1})]) + """ + if name == ident: + raise nx.NetworkXError("name and ident must be different.") + + multigraph = data.get("multigraph") + directed = data.get("directed") + if multigraph: + graph = nx.MultiGraph() + else: + graph = nx.Graph() + if directed: + graph = graph.to_directed() + graph.graph = dict(data.get("data")) + for d in data["elements"]["nodes"]: + node_data = d["data"].copy() + node = d["data"]["value"] + + if d["data"].get(name): + node_data[name] = d["data"].get(name) + if d["data"].get(ident): + node_data[ident] = d["data"].get(ident) + + graph.add_node(node) + graph.nodes[node].update(node_data) + + for d in data["elements"]["edges"]: + edge_data = d["data"].copy() + sour = d["data"]["source"] + targ = d["data"]["target"] + if multigraph: + key = d["data"].get("key", 0) + graph.add_edge(sour, targ, key=key) + graph.edges[sour, targ, key].update(edge_data) + else: + graph.add_edge(sour, targ) + graph.edges[sour, targ].update(edge_data) + return graph diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/node_link.py b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/node_link.py new file mode 100644 index 0000000000000000000000000000000000000000..0b6aa9319e08e541cc4dfaf75b2d2c20f048795d --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/node_link.py @@ -0,0 +1,244 @@ +from itertools import chain, count + +import networkx as nx + +__all__ = ["node_link_data", "node_link_graph"] + + +_attrs = { + "source": "source", + "target": "target", + "name": "id", + "key": "key", + "link": "links", +} + + +def _to_tuple(x): + """Converts lists to tuples, including nested lists. + + All other non-list inputs are passed through unmodified. This function is + intended to be used to convert potentially nested lists from json files + into valid nodes. + + Examples + -------- + >>> _to_tuple([1, 2, [3, 4]]) + (1, 2, (3, 4)) + """ + if not isinstance(x, (tuple, list)): + return x + return tuple(map(_to_tuple, x)) + + +def node_link_data( + G, + *, + source="source", + target="target", + name="id", + key="key", + link="links", +): + """Returns data in node-link format that is suitable for JSON serialization + and use in JavaScript documents. + + Parameters + ---------- + G : NetworkX graph + source : string + A string that provides the 'source' attribute name for storing NetworkX-internal graph data. + target : string + A string that provides the 'target' attribute name for storing NetworkX-internal graph data. + name : string + A string that provides the 'name' attribute name for storing NetworkX-internal graph data. + key : string + A string that provides the 'key' attribute name for storing NetworkX-internal graph data. + link : string + A string that provides the 'link' attribute name for storing NetworkX-internal graph data. + + Returns + ------- + data : dict + A dictionary with node-link formatted data. + + Raises + ------ + NetworkXError + If the values of 'source', 'target' and 'key' are not unique. + + Examples + -------- + >>> G = nx.Graph([("A", "B")]) + >>> data1 = nx.node_link_data(G) + >>> data1 + {'directed': False, 'multigraph': False, 'graph': {}, 'nodes': [{'id': 'A'}, {'id': 'B'}], 'links': [{'source': 'A', 'target': 'B'}]} + + To serialize with JSON + + >>> import json + >>> s1 = json.dumps(data1) + >>> s1 + '{"directed": false, "multigraph": false, "graph": {}, "nodes": [{"id": "A"}, {"id": "B"}], "links": [{"source": "A", "target": "B"}]}' + + A graph can also be serialized by passing `node_link_data` as an encoder function. The two methods are equivalent. + + >>> s1 = json.dumps(G, default=nx.node_link_data) + >>> s1 + '{"directed": false, "multigraph": false, "graph": {}, "nodes": [{"id": "A"}, {"id": "B"}], "links": [{"source": "A", "target": "B"}]}' + + The attribute names for storing NetworkX-internal graph data can + be specified as keyword options. + + >>> H = nx.gn_graph(2) + >>> data2 = nx.node_link_data(H, link="edges", source="from", target="to") + >>> data2 + {'directed': True, 'multigraph': False, 'graph': {}, 'nodes': [{'id': 0}, {'id': 1}], 'edges': [{'from': 1, 'to': 0}]} + + Notes + ----- + Graph, node, and link attributes are stored in this format. Note that + attribute keys will be converted to strings in order to comply with JSON. + + Attribute 'key' is only used for multigraphs. + + To use `node_link_data` in conjunction with `node_link_graph`, + the keyword names for the attributes must match. + + + See Also + -------- + node_link_graph, adjacency_data, tree_data + """ + multigraph = G.is_multigraph() + + # Allow 'key' to be omitted from attrs if the graph is not a multigraph. + key = None if not multigraph else key + if len({source, target, key}) < 3: + raise nx.NetworkXError("Attribute names are not unique.") + data = { + "directed": G.is_directed(), + "multigraph": multigraph, + "graph": G.graph, + "nodes": [{**G.nodes[n], name: n} for n in G], + } + if multigraph: + data[link] = [ + {**d, source: u, target: v, key: k} + for u, v, k, d in G.edges(keys=True, data=True) + ] + else: + data[link] = [{**d, source: u, target: v} for u, v, d in G.edges(data=True)] + return data + + +@nx._dispatch(graphs=None) +def node_link_graph( + data, + directed=False, + multigraph=True, + *, + source="source", + target="target", + name="id", + key="key", + link="links", +): + """Returns graph from node-link data format. + Useful for de-serialization from JSON. + + Parameters + ---------- + data : dict + node-link formatted graph data + + directed : bool + If True, and direction not specified in data, return a directed graph. + + multigraph : bool + If True, and multigraph not specified in data, return a multigraph. + + source : string + A string that provides the 'source' attribute name for storing NetworkX-internal graph data. + target : string + A string that provides the 'target' attribute name for storing NetworkX-internal graph data. + name : string + A string that provides the 'name' attribute name for storing NetworkX-internal graph data. + key : string + A string that provides the 'key' attribute name for storing NetworkX-internal graph data. + link : string + A string that provides the 'link' attribute name for storing NetworkX-internal graph data. + + Returns + ------- + G : NetworkX graph + A NetworkX graph object + + Examples + -------- + + Create data in node-link format by converting a graph. + + >>> G = nx.Graph([('A', 'B')]) + >>> data = nx.node_link_data(G) + >>> data + {'directed': False, 'multigraph': False, 'graph': {}, 'nodes': [{'id': 'A'}, {'id': 'B'}], 'links': [{'source': 'A', 'target': 'B'}]} + + Revert data in node-link format to a graph. + + >>> H = nx.node_link_graph(data) + >>> print(H.edges) + [('A', 'B')] + + To serialize and deserialize a graph with JSON, + + >>> import json + >>> d = json.dumps(node_link_data(G)) + >>> H = node_link_graph(json.loads(d)) + >>> print(G.edges, H.edges) + [('A', 'B')] [('A', 'B')] + + + Notes + ----- + Attribute 'key' is only used for multigraphs. + + To use `node_link_data` in conjunction with `node_link_graph`, + the keyword names for the attributes must match. + + See Also + -------- + node_link_data, adjacency_data, tree_data + """ + multigraph = data.get("multigraph", multigraph) + directed = data.get("directed", directed) + if multigraph: + graph = nx.MultiGraph() + else: + graph = nx.Graph() + if directed: + graph = graph.to_directed() + + # Allow 'key' to be omitted from attrs if the graph is not a multigraph. + key = None if not multigraph else key + graph.graph = data.get("graph", {}) + c = count() + for d in data["nodes"]: + node = _to_tuple(d.get(name, next(c))) + nodedata = {str(k): v for k, v in d.items() if k != name} + graph.add_node(node, **nodedata) + for d in data[link]: + src = tuple(d[source]) if isinstance(d[source], list) else d[source] + tgt = tuple(d[target]) if isinstance(d[target], list) else d[target] + if not multigraph: + edgedata = {str(k): v for k, v in d.items() if k != source and k != target} + graph.add_edge(src, tgt, **edgedata) + else: + ky = d.get(key, None) + edgedata = { + str(k): v + for k, v in d.items() + if k != source and k != target and k != key + } + graph.add_edge(src, tgt, ky, **edgedata) + return graph diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__init__.py b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efa5adc2564ac42ab57081915f6cf14d9d5d33fb Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21e1e748ddbb7c9e359ab1543ddcf1b56880b794 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..098e57f2466f420d21124f2a9b54c5a98516d66b Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc4d06f35204f8da4d300f8fcdf2b35c847b46ec Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9286e3302df488d5f527d846363beb1bf254ce44 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_adjacency.py b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_adjacency.py new file mode 100644 index 0000000000000000000000000000000000000000..37506382c55a110b26fdba32a268545d23f4474b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_adjacency.py @@ -0,0 +1,78 @@ +import copy +import json + +import pytest + +import networkx as nx +from networkx.readwrite.json_graph import adjacency_data, adjacency_graph +from networkx.utils import graphs_equal + + +class TestAdjacency: + def test_graph(self): + G = nx.path_graph(4) + H = adjacency_graph(adjacency_data(G)) + assert graphs_equal(G, H) + + def test_graph_attributes(self): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph["foo"] = "bar" + G.graph[1] = "one" + + H = adjacency_graph(adjacency_data(G)) + assert graphs_equal(G, H) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + d = json.dumps(adjacency_data(G)) + H = adjacency_graph(json.loads(d)) + assert graphs_equal(G, H) + assert H.graph["foo"] == "bar" + assert H.graph[1] == "one" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + def test_digraph(self): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + H = adjacency_graph(adjacency_data(G)) + assert H.is_directed() + assert graphs_equal(G, H) + + def test_multidigraph(self): + G = nx.MultiDiGraph() + nx.add_path(G, [1, 2, 3]) + H = adjacency_graph(adjacency_data(G)) + assert H.is_directed() + assert H.is_multigraph() + assert graphs_equal(G, H) + + def test_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1, 2, key="first") + G.add_edge(1, 2, key="second", color="blue") + H = adjacency_graph(adjacency_data(G)) + assert graphs_equal(G, H) + assert H[1][2]["second"]["color"] == "blue" + + def test_input_data_is_not_modified_when_building_graph(self): + G = nx.path_graph(4) + input_data = adjacency_data(G) + orig_data = copy.deepcopy(input_data) + # Ensure input is unmodified by deserialisation + assert graphs_equal(G, adjacency_graph(input_data)) + assert input_data == orig_data + + def test_adjacency_form_json_serialisable(self): + G = nx.path_graph(4) + H = adjacency_graph(json.loads(json.dumps(adjacency_data(G)))) + assert graphs_equal(G, H) + + def test_exception(self): + with pytest.raises(nx.NetworkXError): + G = nx.MultiDiGraph() + attrs = {"id": "node", "key": "node"} + adjacency_data(G, attrs) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_cytoscape.py b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_cytoscape.py new file mode 100644 index 0000000000000000000000000000000000000000..5d47f21f4217d1997165c4f19feb67d283d2dab2 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_cytoscape.py @@ -0,0 +1,78 @@ +import copy +import json + +import pytest + +import networkx as nx +from networkx.readwrite.json_graph import cytoscape_data, cytoscape_graph + + +def test_graph(): + G = nx.path_graph(4) + H = cytoscape_graph(cytoscape_data(G)) + assert nx.is_isomorphic(G, H) + + +def test_input_data_is_not_modified_when_building_graph(): + G = nx.path_graph(4) + input_data = cytoscape_data(G) + orig_data = copy.deepcopy(input_data) + # Ensure input is unmodified by cytoscape_graph (gh-4173) + cytoscape_graph(input_data) + assert input_data == orig_data + + +def test_graph_attributes(): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph["foo"] = "bar" + G.graph[1] = "one" + G.add_node(3, name="node", id="123") + + H = cytoscape_graph(cytoscape_data(G)) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + assert H.nodes[3]["name"] == "node" + assert H.nodes[3]["id"] == "123" + + d = json.dumps(cytoscape_data(G)) + H = cytoscape_graph(json.loads(d)) + assert H.graph["foo"] == "bar" + assert H.graph[1] == "one" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + assert H.nodes[3]["name"] == "node" + assert H.nodes[3]["id"] == "123" + + +def test_digraph(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + H = cytoscape_graph(cytoscape_data(G)) + assert H.is_directed() + assert nx.is_isomorphic(G, H) + + +def test_multidigraph(): + G = nx.MultiDiGraph() + nx.add_path(G, [1, 2, 3]) + H = cytoscape_graph(cytoscape_data(G)) + assert H.is_directed() + assert H.is_multigraph() + + +def test_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key="first") + G.add_edge(1, 2, key="second", color="blue") + H = cytoscape_graph(cytoscape_data(G)) + assert nx.is_isomorphic(G, H) + assert H[1][2]["second"]["color"] == "blue" + + +def test_exception(): + with pytest.raises(nx.NetworkXError): + G = nx.MultiDiGraph() + cytoscape_data(G, name="foo", ident="foo") diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_node_link.py b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_node_link.py new file mode 100644 index 0000000000000000000000000000000000000000..a432666b650a44013a7b5c385d516585e4d6cab3 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_node_link.py @@ -0,0 +1,144 @@ +import json + +import pytest + +import networkx as nx +from networkx.readwrite.json_graph import node_link_data, node_link_graph + + +class TestNodeLink: + # TODO: To be removed when signature change complete + def test_custom_attrs_dep(self): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph[1] = "one" + G.graph["foo"] = "bar" + + attrs = { + "source": "c_source", + "target": "c_target", + "name": "c_id", + "key": "c_key", + "link": "c_links", + } + + H = node_link_graph(node_link_data(G, **attrs), multigraph=False, **attrs) + assert nx.is_isomorphic(G, H) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + # provide only a partial dictionary of keywords. + # This is similar to an example in the doc string + attrs = { + "link": "c_links", + "source": "c_source", + "target": "c_target", + } + H = node_link_graph(node_link_data(G, **attrs), multigraph=False, **attrs) + assert nx.is_isomorphic(G, H) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + def test_exception_dep(self): + with pytest.raises(nx.NetworkXError): + G = nx.MultiDiGraph() + node_link_data(G, name="node", source="node", target="node", key="node") + + def test_graph(self): + G = nx.path_graph(4) + H = node_link_graph(node_link_data(G)) + assert nx.is_isomorphic(G, H) + + def test_graph_attributes(self): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph[1] = "one" + G.graph["foo"] = "bar" + + H = node_link_graph(node_link_data(G)) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + d = json.dumps(node_link_data(G)) + H = node_link_graph(json.loads(d)) + assert H.graph["foo"] == "bar" + assert H.graph["1"] == "one" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 + + def test_digraph(self): + G = nx.DiGraph() + H = node_link_graph(node_link_data(G)) + assert H.is_directed() + + def test_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1, 2, key="first") + G.add_edge(1, 2, key="second", color="blue") + H = node_link_graph(node_link_data(G)) + assert nx.is_isomorphic(G, H) + assert H[1][2]["second"]["color"] == "blue" + + def test_graph_with_tuple_nodes(self): + G = nx.Graph() + G.add_edge((0, 0), (1, 0), color=[255, 255, 0]) + d = node_link_data(G) + dumped_d = json.dumps(d) + dd = json.loads(dumped_d) + H = node_link_graph(dd) + assert H.nodes[(0, 0)] == G.nodes[(0, 0)] + assert H[(0, 0)][(1, 0)]["color"] == [255, 255, 0] + + def test_unicode_keys(self): + q = "qualité" + G = nx.Graph() + G.add_node(1, **{q: q}) + s = node_link_data(G) + output = json.dumps(s, ensure_ascii=False) + data = json.loads(output) + H = node_link_graph(data) + assert H.nodes[1][q] == q + + def test_exception(self): + with pytest.raises(nx.NetworkXError): + G = nx.MultiDiGraph() + attrs = {"name": "node", "source": "node", "target": "node", "key": "node"} + node_link_data(G, **attrs) + + def test_string_ids(self): + q = "qualité" + G = nx.DiGraph() + G.add_node("A") + G.add_node(q) + G.add_edge("A", q) + data = node_link_data(G) + assert data["links"][0]["source"] == "A" + assert data["links"][0]["target"] == q + H = node_link_graph(data) + assert nx.is_isomorphic(G, H) + + def test_custom_attrs(self): + G = nx.path_graph(4) + G.add_node(1, color="red") + G.add_edge(1, 2, width=7) + G.graph[1] = "one" + G.graph["foo"] = "bar" + + attrs = { + "source": "c_source", + "target": "c_target", + "name": "c_id", + "key": "c_key", + "link": "c_links", + } + + H = node_link_graph(node_link_data(G, **attrs), multigraph=False, **attrs) + assert nx.is_isomorphic(G, H) + assert H.graph["foo"] == "bar" + assert H.nodes[1]["color"] == "red" + assert H[1][2]["width"] == 7 diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_tree.py b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..643a14d89b5211f2d97b98f2e227e68361781b97 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_tree.py @@ -0,0 +1,48 @@ +import json + +import pytest + +import networkx as nx +from networkx.readwrite.json_graph import tree_data, tree_graph + + +def test_graph(): + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3], color="red") + G.add_edge(1, 2, foo=7) + G.add_edge(1, 3, foo=10) + G.add_edge(3, 4, foo=10) + H = tree_graph(tree_data(G, 1)) + assert nx.is_isomorphic(G, H) + + +def test_graph_attributes(): + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3], color="red") + G.add_edge(1, 2, foo=7) + G.add_edge(1, 3, foo=10) + G.add_edge(3, 4, foo=10) + H = tree_graph(tree_data(G, 1)) + assert H.nodes[1]["color"] == "red" + + d = json.dumps(tree_data(G, 1)) + H = tree_graph(json.loads(d)) + assert H.nodes[1]["color"] == "red" + + +def test_exceptions(): + with pytest.raises(TypeError, match="is not a tree."): + G = nx.complete_graph(3) + tree_data(G, 0) + with pytest.raises(TypeError, match="is not directed."): + G = nx.path_graph(3) + tree_data(G, 0) + with pytest.raises(TypeError, match="is not weakly connected."): + G = nx.path_graph(3, create_using=nx.DiGraph) + G.add_edge(2, 0) + G.add_node(3) + tree_data(G, 0) + with pytest.raises(nx.NetworkXError, match="must be different."): + G = nx.MultiDiGraph() + G.add_node(0) + tree_data(G, 0, ident="node", children="node") diff --git a/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tree.py b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tree.py new file mode 100644 index 0000000000000000000000000000000000000000..c0b3af183786c79a0a8e1836467248ee80a1e153 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/json_graph/tree.py @@ -0,0 +1,137 @@ +from itertools import chain + +import networkx as nx + +__all__ = ["tree_data", "tree_graph"] + + +def tree_data(G, root, ident="id", children="children"): + """Returns data in tree format that is suitable for JSON serialization + and use in JavaScript documents. + + Parameters + ---------- + G : NetworkX graph + G must be an oriented tree + + root : node + The root of the tree + + ident : string + Attribute name for storing NetworkX-internal graph data. `ident` must + have a different value than `children`. The default is 'id'. + + children : string + Attribute name for storing NetworkX-internal graph data. `children` + must have a different value than `ident`. The default is 'children'. + + Returns + ------- + data : dict + A dictionary with node-link formatted data. + + Raises + ------ + NetworkXError + If `children` and `ident` attributes are identical. + + Examples + -------- + >>> from networkx.readwrite import json_graph + >>> G = nx.DiGraph([(1, 2)]) + >>> data = json_graph.tree_data(G, root=1) + + To serialize with json + + >>> import json + >>> s = json.dumps(data) + + Notes + ----- + Node attributes are stored in this format but keys + for attributes must be strings if you want to serialize with JSON. + + Graph and edge attributes are not stored. + + See Also + -------- + tree_graph, node_link_data, adjacency_data + """ + if G.number_of_nodes() != G.number_of_edges() + 1: + raise TypeError("G is not a tree.") + if not G.is_directed(): + raise TypeError("G is not directed.") + if not nx.is_weakly_connected(G): + raise TypeError("G is not weakly connected.") + + if ident == children: + raise nx.NetworkXError("The values for `id` and `children` must be different.") + + def add_children(n, G): + nbrs = G[n] + if len(nbrs) == 0: + return [] + children_ = [] + for child in nbrs: + d = {**G.nodes[child], ident: child} + c = add_children(child, G) + if c: + d[children] = c + children_.append(d) + return children_ + + return {**G.nodes[root], ident: root, children: add_children(root, G)} + + +@nx._dispatch(graphs=None) +def tree_graph(data, ident="id", children="children"): + """Returns graph from tree data format. + + Parameters + ---------- + data : dict + Tree formatted graph data + + ident : string + Attribute name for storing NetworkX-internal graph data. `ident` must + have a different value than `children`. The default is 'id'. + + children : string + Attribute name for storing NetworkX-internal graph data. `children` + must have a different value than `ident`. The default is 'children'. + + Returns + ------- + G : NetworkX DiGraph + + Examples + -------- + >>> from networkx.readwrite import json_graph + >>> G = nx.DiGraph([(1, 2)]) + >>> data = json_graph.tree_data(G, root=1) + >>> H = json_graph.tree_graph(data) + + See Also + -------- + tree_data, node_link_data, adjacency_data + """ + graph = nx.DiGraph() + + def add_children(parent, children_): + for data in children_: + child = data[ident] + graph.add_edge(parent, child) + grandchildren = data.get(children, []) + if grandchildren: + add_children(child, grandchildren) + nodedata = { + str(k): v for k, v in data.items() if k != ident and k != children + } + graph.add_node(child, **nodedata) + + root = data[ident] + children_ = data.get(children, []) + nodedata = {str(k): v for k, v in data.items() if k != ident and k != children} + graph.add_node(root, **nodedata) + add_children(root, children_) + return graph diff --git a/phivenv/Lib/site-packages/networkx/readwrite/leda.py b/phivenv/Lib/site-packages/networkx/readwrite/leda.py new file mode 100644 index 0000000000000000000000000000000000000000..735f2779b4a07187d3568165885436528220ffbb --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/leda.py @@ -0,0 +1,108 @@ +""" +Read graphs in LEDA format. + +LEDA is a C++ class library for efficient data types and algorithms. + +Format +------ +See http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html + +""" +# Original author: D. Eppstein, UC Irvine, August 12, 2003. +# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain. + +__all__ = ["read_leda", "parse_leda"] + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils import open_file + + +@open_file(0, mode="rb") +@nx._dispatch(graphs=None) +def read_leda(path, encoding="UTF-8"): + """Read graph in LEDA format from path. + + Parameters + ---------- + path : file or string + File or filename to read. Filenames ending in .gz or .bz2 will be + uncompressed. + + Returns + ------- + G : NetworkX graph + + Examples + -------- + G=nx.read_leda('file.leda') + + References + ---------- + .. [1] http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html + """ + lines = (line.decode(encoding) for line in path) + G = parse_leda(lines) + return G + + +@nx._dispatch(graphs=None) +def parse_leda(lines): + """Read graph in LEDA format from string or iterable. + + Parameters + ---------- + lines : string or iterable + Data in LEDA format. + + Returns + ------- + G : NetworkX graph + + Examples + -------- + G=nx.parse_leda(string) + + References + ---------- + .. [1] http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html + """ + if isinstance(lines, str): + lines = iter(lines.split("\n")) + lines = iter( + [ + line.rstrip("\n") + for line in lines + if not (line.startswith(("#", "\n")) or line == "") + ] + ) + for i in range(3): + next(lines) + # Graph + du = int(next(lines)) # -1=directed, -2=undirected + if du == -1: + G = nx.DiGraph() + else: + G = nx.Graph() + + # Nodes + n = int(next(lines)) # number of nodes + node = {} + for i in range(1, n + 1): # LEDA counts from 1 to n + symbol = next(lines).rstrip().strip("|{}| ") + if symbol == "": + symbol = str(i) # use int if no label - could be trouble + node[i] = symbol + + G.add_nodes_from([s for i, s in node.items()]) + + # Edges + m = int(next(lines)) # number of edges + for i in range(m): + try: + s, t, reversal, label = next(lines).split() + except BaseException as err: + raise NetworkXError(f"Too few fields in LEDA.GRAPH edge {i+1}") from err + # BEWARE: no handling of reversal edges + G.add_edge(node[int(s)], node[int(t)], label=label[2:-2]) + return G diff --git a/phivenv/Lib/site-packages/networkx/readwrite/multiline_adjlist.py b/phivenv/Lib/site-packages/networkx/readwrite/multiline_adjlist.py new file mode 100644 index 0000000000000000000000000000000000000000..aef6538515854e56be12a941b35c22d5b053cd46 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/multiline_adjlist.py @@ -0,0 +1,393 @@ +""" +************************* +Multi-line Adjacency List +************************* +Read and write NetworkX graphs as multi-line adjacency lists. + +The multi-line adjacency list format is useful for graphs with +nodes that can be meaningfully represented as strings. With this format +simple edge data can be stored but node or graph data is not. + +Format +------ +The first label in a line is the source node label followed by the node degree +d. The next d lines are target node labels and optional edge data. +That pattern repeats for all nodes in the graph. + +The graph with edges a-b, a-c, d-e can be represented as the following +adjacency list (anything following the # in a line is a comment):: + + # example.multiline-adjlist + a 2 + b + c + d 1 + e +""" + +__all__ = [ + "generate_multiline_adjlist", + "write_multiline_adjlist", + "parse_multiline_adjlist", + "read_multiline_adjlist", +] + +import networkx as nx +from networkx.utils import open_file + + +def generate_multiline_adjlist(G, delimiter=" "): + """Generate a single line of the graph G in multiline adjacency list format. + + Parameters + ---------- + G : NetworkX graph + + delimiter : string, optional + Separator for node labels + + Returns + ------- + lines : string + Lines of data in multiline adjlist format. + + Examples + -------- + >>> G = nx.lollipop_graph(4, 3) + >>> for line in nx.generate_multiline_adjlist(G): + ... print(line) + 0 3 + 1 {} + 2 {} + 3 {} + 1 2 + 2 {} + 3 {} + 2 1 + 3 {} + 3 1 + 4 {} + 4 1 + 5 {} + 5 1 + 6 {} + 6 0 + + See Also + -------- + write_multiline_adjlist, read_multiline_adjlist + """ + if G.is_directed(): + if G.is_multigraph(): + for s, nbrs in G.adjacency(): + nbr_edges = [ + (u, data) + for u, datadict in nbrs.items() + for key, data in datadict.items() + ] + deg = len(nbr_edges) + yield str(s) + delimiter + str(deg) + for u, d in nbr_edges: + if d is None: + yield str(u) + else: + yield str(u) + delimiter + str(d) + else: # directed single edges + for s, nbrs in G.adjacency(): + deg = len(nbrs) + yield str(s) + delimiter + str(deg) + for u, d in nbrs.items(): + if d is None: + yield str(u) + else: + yield str(u) + delimiter + str(d) + else: # undirected + if G.is_multigraph(): + seen = set() # helper dict used to avoid duplicate edges + for s, nbrs in G.adjacency(): + nbr_edges = [ + (u, data) + for u, datadict in nbrs.items() + if u not in seen + for key, data in datadict.items() + ] + deg = len(nbr_edges) + yield str(s) + delimiter + str(deg) + for u, d in nbr_edges: + if d is None: + yield str(u) + else: + yield str(u) + delimiter + str(d) + seen.add(s) + else: # undirected single edges + seen = set() # helper dict used to avoid duplicate edges + for s, nbrs in G.adjacency(): + nbr_edges = [(u, d) for u, d in nbrs.items() if u not in seen] + deg = len(nbr_edges) + yield str(s) + delimiter + str(deg) + for u, d in nbr_edges: + if d is None: + yield str(u) + else: + yield str(u) + delimiter + str(d) + seen.add(s) + + +@open_file(1, mode="wb") +def write_multiline_adjlist(G, path, delimiter=" ", comments="#", encoding="utf-8"): + """Write the graph G in multiline adjacency list format to path + + Parameters + ---------- + G : NetworkX graph + + path : string or file + Filename or file handle to write to. + Filenames ending in .gz or .bz2 will be compressed. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels + + encoding : string, optional + Text encoding. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_multiline_adjlist(G, "test.adjlist") + + The path can be a file handle or a string with the name of the file. If a + file handle is provided, it has to be opened in 'wb' mode. + + >>> fh = open("test.adjlist", "wb") + >>> nx.write_multiline_adjlist(G, fh) + + Filenames ending in .gz or .bz2 will be compressed. + + >>> nx.write_multiline_adjlist(G, "test.adjlist.gz") + + See Also + -------- + read_multiline_adjlist + """ + import sys + import time + + pargs = comments + " ".join(sys.argv) + header = ( + f"{pargs}\n" + + comments + + f" GMT {time.asctime(time.gmtime())}\n" + + comments + + f" {G.name}\n" + ) + path.write(header.encode(encoding)) + + for multiline in generate_multiline_adjlist(G, delimiter): + multiline += "\n" + path.write(multiline.encode(encoding)) + + +@nx._dispatch(graphs=None) +def parse_multiline_adjlist( + lines, comments="#", delimiter=None, create_using=None, nodetype=None, edgetype=None +): + """Parse lines of a multiline adjacency list representation of a graph. + + Parameters + ---------- + lines : list or iterator of strings + Input data in multiline adjlist format + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + nodetype : Python type, optional + Convert nodes to this type. + + edgetype : Python type, optional + Convert edges to this type. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels. The default is whitespace. + + Returns + ------- + G: NetworkX graph + The graph corresponding to the lines in multiline adjacency list format. + + Examples + -------- + >>> lines = [ + ... "1 2", + ... "2 {'weight':3, 'name': 'Frodo'}", + ... "3 {}", + ... "2 1", + ... "5 {'weight':6, 'name': 'Saruman'}", + ... ] + >>> G = nx.parse_multiline_adjlist(iter(lines), nodetype=int) + >>> list(G) + [1, 2, 3, 5] + + """ + from ast import literal_eval + + G = nx.empty_graph(0, create_using) + for line in lines: + p = line.find(comments) + if p >= 0: + line = line[:p] + if not line: + continue + try: + (u, deg) = line.strip().split(delimiter) + deg = int(deg) + except BaseException as err: + raise TypeError(f"Failed to read node and degree on line ({line})") from err + if nodetype is not None: + try: + u = nodetype(u) + except BaseException as err: + raise TypeError( + f"Failed to convert node ({u}) to " f"type {nodetype}" + ) from err + G.add_node(u) + for i in range(deg): + while True: + try: + line = next(lines) + except StopIteration as err: + msg = f"Failed to find neighbor for node ({u})" + raise TypeError(msg) from err + p = line.find(comments) + if p >= 0: + line = line[:p] + if line: + break + vlist = line.strip().split(delimiter) + numb = len(vlist) + if numb < 1: + continue # isolated node + v = vlist.pop(0) + data = "".join(vlist) + if nodetype is not None: + try: + v = nodetype(v) + except BaseException as err: + raise TypeError( + f"Failed to convert node ({v}) " f"to type {nodetype}" + ) from err + if edgetype is not None: + try: + edgedata = {"weight": edgetype(data)} + except BaseException as err: + raise TypeError( + f"Failed to convert edge data ({data}) " f"to type {edgetype}" + ) from err + else: + try: # try to evaluate + edgedata = literal_eval(data) + except: + edgedata = {} + G.add_edge(u, v, **edgedata) + + return G + + +@open_file(0, mode="rb") +@nx._dispatch(graphs=None) +def read_multiline_adjlist( + path, + comments="#", + delimiter=None, + create_using=None, + nodetype=None, + edgetype=None, + encoding="utf-8", +): + """Read graph in multi-line adjacency list format from path. + + Parameters + ---------- + path : string or file + Filename or file handle to read. + Filenames ending in .gz or .bz2 will be uncompressed. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + nodetype : Python type, optional + Convert nodes to this type. + + edgetype : Python type, optional + Convert edge data to this type. + + comments : string, optional + Marker for comment lines + + delimiter : string, optional + Separator for node labels. The default is whitespace. + + Returns + ------- + G: NetworkX graph + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_multiline_adjlist(G, "test.adjlist") + >>> G = nx.read_multiline_adjlist("test.adjlist") + + The path can be a file or a string with the name of the file. If a + file s provided, it has to be opened in 'rb' mode. + + >>> fh = open("test.adjlist", "rb") + >>> G = nx.read_multiline_adjlist(fh) + + Filenames ending in .gz or .bz2 will be compressed. + + >>> nx.write_multiline_adjlist(G, "test.adjlist.gz") + >>> G = nx.read_multiline_adjlist("test.adjlist.gz") + + The optional nodetype is a function to convert node strings to nodetype. + + For example + + >>> G = nx.read_multiline_adjlist("test.adjlist", nodetype=int) + + will attempt to convert all nodes to integer type. + + The optional edgetype is a function to convert edge data strings to + edgetype. + + >>> G = nx.read_multiline_adjlist("test.adjlist") + + The optional create_using parameter is a NetworkX graph container. + The default is Graph(), an undirected graph. To read the data as + a directed graph use + + >>> G = nx.read_multiline_adjlist("test.adjlist", create_using=nx.DiGraph) + + Notes + ----- + This format does not store graph, node, or edge data. + + See Also + -------- + write_multiline_adjlist + """ + lines = (line.decode(encoding) for line in path) + return parse_multiline_adjlist( + lines, + comments=comments, + delimiter=delimiter, + create_using=create_using, + nodetype=nodetype, + edgetype=edgetype, + ) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/p2g.py b/phivenv/Lib/site-packages/networkx/readwrite/p2g.py new file mode 100644 index 0000000000000000000000000000000000000000..84c72572e91912c8e81d168755e646728d02cf99 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/p2g.py @@ -0,0 +1,104 @@ +""" +This module provides the following: read and write of p2g format +used in metabolic pathway studies. + +See https://web.archive.org/web/20080626113807/http://www.cs.purdue.edu/homes/koyuturk/pathway/ for a description. + +The summary is included here: + +A file that describes a uniquely labeled graph (with extension ".gr") +format looks like the following: + + +name +3 4 +a +1 2 +b + +c +0 2 + +"name" is simply a description of what the graph corresponds to. The +second line displays the number of nodes and number of edges, +respectively. This sample graph contains three nodes labeled "a", "b", +and "c". The rest of the graph contains two lines for each node. The +first line for a node contains the node label. After the declaration +of the node label, the out-edges of that node in the graph are +provided. For instance, "a" is linked to nodes 1 and 2, which are +labeled "b" and "c", while the node labeled "b" has no outgoing +edges. Observe that node labeled "c" has an outgoing edge to +itself. Indeed, self-loops are allowed. Node index starts from 0. + +""" +import networkx as nx +from networkx.utils import open_file + + +@open_file(1, mode="w") +def write_p2g(G, path, encoding="utf-8"): + """Write NetworkX graph in p2g format. + + Notes + ----- + This format is meant to be used with directed graphs with + possible self loops. + """ + path.write((f"{G.name}\n").encode(encoding)) + path.write((f"{G.order()} {G.size()}\n").encode(encoding)) + nodes = list(G) + # make dictionary mapping nodes to integers + nodenumber = dict(zip(nodes, range(len(nodes)))) + for n in nodes: + path.write((f"{n}\n").encode(encoding)) + for nbr in G.neighbors(n): + path.write((f"{nodenumber[nbr]} ").encode(encoding)) + path.write("\n".encode(encoding)) + + +@open_file(0, mode="r") +@nx._dispatch(graphs=None) +def read_p2g(path, encoding="utf-8"): + """Read graph in p2g format from path. + + Returns + ------- + MultiDiGraph + + Notes + ----- + If you want a DiGraph (with no self loops allowed and no edge data) + use D=nx.DiGraph(read_p2g(path)) + """ + lines = (line.decode(encoding) for line in path) + G = parse_p2g(lines) + return G + + +@nx._dispatch(graphs=None) +def parse_p2g(lines): + """Parse p2g format graph from string or iterable. + + Returns + ------- + MultiDiGraph + """ + description = next(lines).strip() + # are multiedges (parallel edges) allowed? + G = nx.MultiDiGraph(name=description, selfloops=True) + nnodes, nedges = map(int, next(lines).split()) + nodelabel = {} + nbrs = {} + # loop over the nodes keeping track of node labels and out neighbors + # defer adding edges until all node labels are known + for i in range(nnodes): + n = next(lines).strip() + nodelabel[i] = n + G.add_node(n) + nbrs[n] = map(int, next(lines).split()) + # now we know all of the node labels so we can add the edges + # with the correct labels + for n in G: + for nbr in nbrs[n]: + G.add_edge(n, nodelabel[nbr]) + return G diff --git a/phivenv/Lib/site-packages/networkx/readwrite/pajek.py b/phivenv/Lib/site-packages/networkx/readwrite/pajek.py new file mode 100644 index 0000000000000000000000000000000000000000..0c6050f15078b5f50fe3f64cf3b247ed4d532d74 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/pajek.py @@ -0,0 +1,286 @@ +""" +***** +Pajek +***** +Read graphs in Pajek format. + +This implementation handles directed and undirected graphs including +those with self loops and parallel edges. + +Format +------ +See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm +for format information. + +""" + +import warnings + +import networkx as nx +from networkx.utils import open_file + +__all__ = ["read_pajek", "parse_pajek", "generate_pajek", "write_pajek"] + + +def generate_pajek(G): + """Generate lines in Pajek graph format. + + Parameters + ---------- + G : graph + A Networkx graph + + References + ---------- + See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm + for format information. + """ + if G.name == "": + name = "NetworkX" + else: + name = G.name + # Apparently many Pajek format readers can't process this line + # So we'll leave it out for now. + # yield '*network %s'%name + + # write nodes with attributes + yield f"*vertices {G.order()}" + nodes = list(G) + # make dictionary mapping nodes to integers + nodenumber = dict(zip(nodes, range(1, len(nodes) + 1))) + for n in nodes: + # copy node attributes and pop mandatory attributes + # to avoid duplication. + na = G.nodes.get(n, {}).copy() + x = na.pop("x", 0.0) + y = na.pop("y", 0.0) + try: + id = int(na.pop("id", nodenumber[n])) + except ValueError as err: + err.args += ( + ( + "Pajek format requires 'id' to be an int()." + " Refer to the 'Relabeling nodes' section." + ), + ) + raise + nodenumber[n] = id + shape = na.pop("shape", "ellipse") + s = " ".join(map(make_qstr, (id, n, x, y, shape))) + # only optional attributes are left in na. + for k, v in na.items(): + if isinstance(v, str) and v.strip() != "": + s += f" {make_qstr(k)} {make_qstr(v)}" + else: + warnings.warn( + f"Node attribute {k} is not processed. {('Empty attribute' if isinstance(v, str) else 'Non-string attribute')}." + ) + yield s + + # write edges with attributes + if G.is_directed(): + yield "*arcs" + else: + yield "*edges" + for u, v, edgedata in G.edges(data=True): + d = edgedata.copy() + value = d.pop("weight", 1.0) # use 1 as default edge value + s = " ".join(map(make_qstr, (nodenumber[u], nodenumber[v], value))) + for k, v in d.items(): + if isinstance(v, str) and v.strip() != "": + s += f" {make_qstr(k)} {make_qstr(v)}" + else: + warnings.warn( + f"Edge attribute {k} is not processed. {('Empty attribute' if isinstance(v, str) else 'Non-string attribute')}." + ) + yield s + + +@open_file(1, mode="wb") +def write_pajek(G, path, encoding="UTF-8"): + """Write graph in Pajek format to path. + + Parameters + ---------- + G : graph + A Networkx graph + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be compressed. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_pajek(G, "test.net") + + Warnings + -------- + Optional node attributes and edge attributes must be non-empty strings. + Otherwise it will not be written into the file. You will need to + convert those attributes to strings if you want to keep them. + + References + ---------- + See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm + for format information. + """ + for line in generate_pajek(G): + line += "\n" + path.write(line.encode(encoding)) + + +@open_file(0, mode="rb") +@nx._dispatch(graphs=None) +def read_pajek(path, encoding="UTF-8"): + """Read graph in Pajek format from path. + + Parameters + ---------- + path : file or string + File or filename to write. + Filenames ending in .gz or .bz2 will be uncompressed. + + Returns + ------- + G : NetworkX MultiGraph or MultiDiGraph. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.write_pajek(G, "test.net") + >>> G = nx.read_pajek("test.net") + + To create a Graph instead of a MultiGraph use + + >>> G1 = nx.Graph(G) + + References + ---------- + See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm + for format information. + """ + lines = (line.decode(encoding) for line in path) + return parse_pajek(lines) + + +@nx._dispatch(graphs=None) +def parse_pajek(lines): + """Parse Pajek format graph from string or iterable. + + Parameters + ---------- + lines : string or iterable + Data in Pajek format. + + Returns + ------- + G : NetworkX graph + + See Also + -------- + read_pajek + + """ + import shlex + + # multigraph=False + if isinstance(lines, str): + lines = iter(lines.split("\n")) + lines = iter([line.rstrip("\n") for line in lines]) + G = nx.MultiDiGraph() # are multiedges allowed in Pajek? assume yes + labels = [] # in the order of the file, needed for matrix + while lines: + try: + l = next(lines) + except: # EOF + break + if l.lower().startswith("*network"): + try: + label, name = l.split(None, 1) + except ValueError: + # Line was not of the form: *network NAME + pass + else: + G.graph["name"] = name + elif l.lower().startswith("*vertices"): + nodelabels = {} + l, nnodes = l.split() + for i in range(int(nnodes)): + l = next(lines) + try: + splitline = [ + x.decode("utf-8") for x in shlex.split(str(l).encode("utf-8")) + ] + except AttributeError: + splitline = shlex.split(str(l)) + id, label = splitline[0:2] + labels.append(label) + G.add_node(label) + nodelabels[id] = label + G.nodes[label]["id"] = id + try: + x, y, shape = splitline[2:5] + G.nodes[label].update( + {"x": float(x), "y": float(y), "shape": shape} + ) + except: + pass + extra_attr = zip(splitline[5::2], splitline[6::2]) + G.nodes[label].update(extra_attr) + elif l.lower().startswith("*edges") or l.lower().startswith("*arcs"): + if l.lower().startswith("*edge"): + # switch from multidigraph to multigraph + G = nx.MultiGraph(G) + if l.lower().startswith("*arcs"): + # switch to directed with multiple arcs for each existing edge + G = G.to_directed() + for l in lines: + try: + splitline = [ + x.decode("utf-8") for x in shlex.split(str(l).encode("utf-8")) + ] + except AttributeError: + splitline = shlex.split(str(l)) + + if len(splitline) < 2: + continue + ui, vi = splitline[0:2] + u = nodelabels.get(ui, ui) + v = nodelabels.get(vi, vi) + # parse the data attached to this edge and put in a dictionary + edge_data = {} + try: + # there should always be a single value on the edge? + w = splitline[2:3] + edge_data.update({"weight": float(w[0])}) + except: + pass + # if there isn't, just assign a 1 + # edge_data.update({'value':1}) + extra_attr = zip(splitline[3::2], splitline[4::2]) + edge_data.update(extra_attr) + # if G.has_edge(u,v): + # multigraph=True + G.add_edge(u, v, **edge_data) + elif l.lower().startswith("*matrix"): + G = nx.DiGraph(G) + adj_list = ( + (labels[row], labels[col], {"weight": int(data)}) + for (row, line) in enumerate(lines) + for (col, data) in enumerate(line.split()) + if int(data) != 0 + ) + G.add_edges_from(adj_list) + + return G + + +def make_qstr(t): + """Returns the string representation of t. + Add outer double-quotes if the string has a space. + """ + if not isinstance(t, str): + t = str(t) + if " " in t: + t = f'"{t}"' + return t diff --git a/phivenv/Lib/site-packages/networkx/readwrite/sparse6.py b/phivenv/Lib/site-packages/networkx/readwrite/sparse6.py new file mode 100644 index 0000000000000000000000000000000000000000..7c43109ae7bd8b2ee65c27e3e2fa3b00a29d83f9 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/sparse6.py @@ -0,0 +1,376 @@ +# Original author: D. Eppstein, UC Irvine, August 12, 2003. +# The original code at https://www.ics.uci.edu/~eppstein/PADS/ is public domain. +"""Functions for reading and writing graphs in the *sparse6* format. + +The *sparse6* file format is a space-efficient format for large sparse +graphs. For small graphs or large dense graphs, use the *graph6* file +format. + +For more information, see the `sparse6`_ homepage. + +.. _sparse6: https://users.cecs.anu.edu.au/~bdm/data/formats.html + +""" +import networkx as nx +from networkx.exception import NetworkXError +from networkx.readwrite.graph6 import data_to_n, n_to_data +from networkx.utils import not_implemented_for, open_file + +__all__ = ["from_sparse6_bytes", "read_sparse6", "to_sparse6_bytes", "write_sparse6"] + + +def _generate_sparse6_bytes(G, nodes, header): + """Yield bytes in the sparse6 encoding of a graph. + + `G` is an undirected simple graph. `nodes` is the list of nodes for + which the node-induced subgraph will be encoded; if `nodes` is the + list of all nodes in the graph, the entire graph will be + encoded. `header` is a Boolean that specifies whether to generate + the header ``b'>>sparse6<<'`` before the remaining data. + + This function generates `bytes` objects in the following order: + + 1. the header (if requested), + 2. the encoding of the number of nodes, + 3. each character, one-at-a-time, in the encoding of the requested + node-induced subgraph, + 4. a newline character. + + This function raises :exc:`ValueError` if the graph is too large for + the graph6 format (that is, greater than ``2 ** 36`` nodes). + + """ + n = len(G) + if n >= 2**36: + raise ValueError( + "sparse6 is only defined if number of nodes is less " "than 2 ** 36" + ) + if header: + yield b">>sparse6<<" + yield b":" + for d in n_to_data(n): + yield str.encode(chr(d + 63)) + + k = 1 + while 1 << k < n: + k += 1 + + def enc(x): + """Big endian k-bit encoding of x""" + return [1 if (x & 1 << (k - 1 - i)) else 0 for i in range(k)] + + edges = sorted((max(u, v), min(u, v)) for u, v in G.edges()) + bits = [] + curv = 0 + for v, u in edges: + if v == curv: # current vertex edge + bits.append(0) + bits.extend(enc(u)) + elif v == curv + 1: # next vertex edge + curv += 1 + bits.append(1) + bits.extend(enc(u)) + else: # skip to vertex v and then add edge to u + curv = v + bits.append(1) + bits.extend(enc(v)) + bits.append(0) + bits.extend(enc(u)) + if k < 6 and n == (1 << k) and ((-len(bits)) % 6) >= k and curv < (n - 1): + # Padding special case: small k, n=2^k, + # more than k bits of padding needed, + # current vertex is not (n-1) -- + # appending 1111... would add a loop on (n-1) + bits.append(0) + bits.extend([1] * ((-len(bits)) % 6)) + else: + bits.extend([1] * ((-len(bits)) % 6)) + + data = [ + (bits[i + 0] << 5) + + (bits[i + 1] << 4) + + (bits[i + 2] << 3) + + (bits[i + 3] << 2) + + (bits[i + 4] << 1) + + (bits[i + 5] << 0) + for i in range(0, len(bits), 6) + ] + + for d in data: + yield str.encode(chr(d + 63)) + yield b"\n" + + +@nx._dispatch(graphs=None) +def from_sparse6_bytes(string): + """Read an undirected graph in sparse6 format from string. + + Parameters + ---------- + string : string + Data in sparse6 format + + Returns + ------- + G : Graph + + Raises + ------ + NetworkXError + If the string is unable to be parsed in sparse6 format + + Examples + -------- + >>> G = nx.from_sparse6_bytes(b":A_") + >>> sorted(G.edges()) + [(0, 1), (0, 1), (0, 1)] + + See Also + -------- + read_sparse6, write_sparse6 + + References + ---------- + .. [1] Sparse6 specification + + + """ + if string.startswith(b">>sparse6<<"): + string = string[11:] + if not string.startswith(b":"): + raise NetworkXError("Expected leading colon in sparse6") + + chars = [c - 63 for c in string[1:]] + n, data = data_to_n(chars) + k = 1 + while 1 << k < n: + k += 1 + + def parseData(): + """Returns stream of pairs b[i], x[i] for sparse6 format.""" + chunks = iter(data) + d = None # partial data word + dLen = 0 # how many unparsed bits are left in d + + while 1: + if dLen < 1: + try: + d = next(chunks) + except StopIteration: + return + dLen = 6 + dLen -= 1 + b = (d >> dLen) & 1 # grab top remaining bit + + x = d & ((1 << dLen) - 1) # partially built up value of x + xLen = dLen # how many bits included so far in x + while xLen < k: # now grab full chunks until we have enough + try: + d = next(chunks) + except StopIteration: + return + dLen = 6 + x = (x << 6) + d + xLen += 6 + x = x >> (xLen - k) # shift back the extra bits + dLen = xLen - k + yield b, x + + v = 0 + + G = nx.MultiGraph() + G.add_nodes_from(range(n)) + + multigraph = False + for b, x in parseData(): + if b == 1: + v += 1 + # padding with ones can cause overlarge number here + if x >= n or v >= n: + break + elif x > v: + v = x + else: + if G.has_edge(x, v): + multigraph = True + G.add_edge(x, v) + if not multigraph: + G = nx.Graph(G) + return G + + +def to_sparse6_bytes(G, nodes=None, header=True): + """Convert an undirected graph to bytes in sparse6 format. + + Parameters + ---------- + G : Graph (undirected) + + nodes: list or iterable + Nodes are labeled 0...n-1 in the order provided. If None the ordering + given by ``G.nodes()`` is used. + + header: bool + If True add '>>sparse6<<' bytes to head of data. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed. + + ValueError + If the graph has at least ``2 ** 36`` nodes; the sparse6 format + is only defined for graphs of order less than ``2 ** 36``. + + Examples + -------- + >>> nx.to_sparse6_bytes(nx.path_graph(2)) + b'>>sparse6<<:An\\n' + + See Also + -------- + to_sparse6_bytes, read_sparse6, write_sparse6_bytes + + Notes + ----- + The returned bytes end with a newline character. + + The format does not support edge or node labels. + + References + ---------- + .. [1] Graph6 specification + + + """ + if nodes is not None: + G = G.subgraph(nodes) + G = nx.convert_node_labels_to_integers(G, ordering="sorted") + return b"".join(_generate_sparse6_bytes(G, nodes, header)) + + +@open_file(0, mode="rb") +@nx._dispatch(graphs=None) +def read_sparse6(path): + """Read an undirected graph in sparse6 format from path. + + Parameters + ---------- + path : file or string + File or filename to write. + + Returns + ------- + G : Graph/Multigraph or list of Graphs/MultiGraphs + If the file contains multiple lines then a list of graphs is returned + + Raises + ------ + NetworkXError + If the string is unable to be parsed in sparse6 format + + Examples + -------- + You can read a sparse6 file by giving the path to the file:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile(delete=False) as f: + ... _ = f.write(b">>sparse6<<:An\\n") + ... _ = f.seek(0) + ... G = nx.read_sparse6(f.name) + >>> list(G.edges()) + [(0, 1)] + + You can also read a sparse6 file by giving an open file-like object:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile() as f: + ... _ = f.write(b">>sparse6<<:An\\n") + ... _ = f.seek(0) + ... G = nx.read_sparse6(f) + >>> list(G.edges()) + [(0, 1)] + + See Also + -------- + read_sparse6, from_sparse6_bytes + + References + ---------- + .. [1] Sparse6 specification + + + """ + glist = [] + for line in path: + line = line.strip() + if not len(line): + continue + glist.append(from_sparse6_bytes(line)) + if len(glist) == 1: + return glist[0] + else: + return glist + + +@not_implemented_for("directed") +@open_file(1, mode="wb") +def write_sparse6(G, path, nodes=None, header=True): + """Write graph G to given path in sparse6 format. + + Parameters + ---------- + G : Graph (undirected) + + path : file or string + File or filename to write + + nodes: list or iterable + Nodes are labeled 0...n-1 in the order provided. If None the ordering + given by G.nodes() is used. + + header: bool + If True add '>>sparse6<<' string to head of data + + Raises + ------ + NetworkXError + If the graph is directed + + Examples + -------- + You can write a sparse6 file by giving the path to the file:: + + >>> import tempfile + >>> with tempfile.NamedTemporaryFile(delete=False) as f: + ... nx.write_sparse6(nx.path_graph(2), f.name) + ... print(f.read()) + b'>>sparse6<<:An\\n' + + You can also write a sparse6 file by giving an open file-like object:: + + >>> with tempfile.NamedTemporaryFile() as f: + ... nx.write_sparse6(nx.path_graph(2), f) + ... _ = f.seek(0) + ... print(f.read()) + b'>>sparse6<<:An\\n' + + See Also + -------- + read_sparse6, from_sparse6_bytes + + Notes + ----- + The format does not support edge or node labels. + + References + ---------- + .. [1] Sparse6 specification + + + """ + if nodes is not None: + G = G.subgraph(nodes) + G = nx.convert_node_labels_to_integers(G, ordering="sorted") + for b in _generate_sparse6_bytes(G, nodes, header): + path.write(b) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/__init__.py b/phivenv/Lib/site-packages/networkx/readwrite/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d036c33935be54786f07feca76e8d96a2b5ba740 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_adjlist.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_adjlist.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12d88fcf093bf5dad02c7d08575d0331b87bb032 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_adjlist.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_edgelist.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_edgelist.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..597e877dc0588c727172f5c4ec39d54c3607ce68 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_edgelist.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_gexf.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_gexf.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f4ca645e52e2ab9ff01fcc2ef8fe8af5666106a Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_gexf.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_gml.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_gml.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2b93dd68f13e0018e9ee9915178e605000ac84d Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_gml.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_graph6.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_graph6.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a18f77dbeb3b4501ab63c4f2964767ba650f2fb Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_graph6.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_graphml.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_graphml.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97f3a33890247e750cf9a382664fa41ebbd029f7 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_graphml.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_leda.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_leda.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93040d5d9f38ab7d9603109607383c145f68e060 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_leda.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_p2g.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_p2g.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ab70cbec43dd21a6e03066524badf8a098b4301 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_p2g.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_pajek.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_pajek.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8bd820ad50bdce6f8a3a7453b7beb275d801b87 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_pajek.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_sparse6.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_sparse6.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cb7eb4d2a17edb368d365c019716bcccc86bd56 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_sparse6.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_text.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_text.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11f192ef94394c1aa893549bb240e2d21094d953 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/readwrite/tests/__pycache__/test_text.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/test_adjlist.py b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_adjlist.py new file mode 100644 index 0000000000000000000000000000000000000000..aecfb87e94057ef5ff1e81639afb8bce1e89ddff --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_adjlist.py @@ -0,0 +1,268 @@ +""" + Unit tests for adjlist. +""" +import io +import os +import tempfile + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, graphs_equal, nodes_equal + + +class TestAdjlist: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_node("g") + cls.DG = nx.DiGraph(cls.G) + cls.XG = nx.MultiGraph() + cls.XG.add_weighted_edges_from([(1, 2, 5), (1, 2, 5), (1, 2, 1), (3, 3, 42)]) + cls.XDG = nx.MultiDiGraph(cls.XG) + + def test_read_multiline_adjlist_1(self): + # Unit test for https://networkx.lanl.gov/trac/ticket/252 + s = b"""# comment line +1 2 +# comment line +2 +3 +""" + bytesIO = io.BytesIO(s) + G = nx.read_multiline_adjlist(bytesIO) + adj = {"1": {"3": {}, "2": {}}, "3": {"1": {}}, "2": {"1": {}}} + assert graphs_equal(G, nx.Graph(adj)) + + def test_unicode(self): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + fd, fname = tempfile.mkstemp() + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname) + assert graphs_equal(G, H) + os.close(fd) + os.unlink(fname) + + def test_latin1_err(self): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + fd, fname = tempfile.mkstemp() + pytest.raises( + UnicodeEncodeError, nx.write_multiline_adjlist, G, fname, encoding="latin-1" + ) + os.close(fd) + os.unlink(fname) + + def test_latin1(self): + G = nx.Graph() + name1 = "Bj" + chr(246) + "rk" + name2 = chr(220) + "ber" + G.add_edge(name1, "Radiohead", **{name2: 3}) + fd, fname = tempfile.mkstemp() + nx.write_multiline_adjlist(G, fname, encoding="latin-1") + H = nx.read_multiline_adjlist(fname, encoding="latin-1") + assert graphs_equal(G, H) + os.close(fd) + os.unlink(fname) + + def test_parse_adjlist(self): + lines = ["1 2 5", "2 3 4", "3 5", "4", "5"] + nx.parse_adjlist(lines, nodetype=int) # smoke test + with pytest.raises(TypeError): + nx.parse_adjlist(lines, nodetype="int") + lines = ["1 2 5", "2 b", "c"] + with pytest.raises(TypeError): + nx.parse_adjlist(lines, nodetype=int) + + def test_adjlist_graph(self): + G = self.G + (fd, fname) = tempfile.mkstemp() + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname) + H2 = nx.read_adjlist(fname) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_adjlist_digraph(self): + G = self.DG + (fd, fname) = tempfile.mkstemp() + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname, create_using=nx.DiGraph()) + H2 = nx.read_adjlist(fname, create_using=nx.DiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_adjlist_integers(self): + (fd, fname) = tempfile.mkstemp() + G = nx.convert_node_labels_to_integers(self.G) + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname, nodetype=int) + H2 = nx.read_adjlist(fname, nodetype=int) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_adjlist_multigraph(self): + G = self.XG + (fd, fname) = tempfile.mkstemp() + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiGraph()) + H2 = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_adjlist_multidigraph(self): + G = self.XDG + (fd, fname) = tempfile.mkstemp() + nx.write_adjlist(G, fname) + H = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiDiGraph()) + H2 = nx.read_adjlist(fname, nodetype=int, create_using=nx.MultiDiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_adjlist_delimiter(self): + fh = io.BytesIO() + G = nx.path_graph(3) + nx.write_adjlist(G, fh, delimiter=":") + fh.seek(0) + H = nx.read_adjlist(fh, nodetype=int, delimiter=":") + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + + +class TestMultilineAdjlist: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_node("g") + cls.DG = nx.DiGraph(cls.G) + cls.DG.remove_edge("b", "a") + cls.DG.remove_edge("b", "c") + cls.XG = nx.MultiGraph() + cls.XG.add_weighted_edges_from([(1, 2, 5), (1, 2, 5), (1, 2, 1), (3, 3, 42)]) + cls.XDG = nx.MultiDiGraph(cls.XG) + + def test_parse_multiline_adjlist(self): + lines = [ + "1 2", + "b {'weight':3, 'name': 'Frodo'}", + "c {}", + "d 1", + "e {'weight':6, 'name': 'Saruman'}", + ] + nx.parse_multiline_adjlist(iter(lines)) # smoke test + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines), nodetype=int) + nx.parse_multiline_adjlist(iter(lines), edgetype=str) # smoke test + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines), nodetype=int) + lines = ["1 a"] + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines)) + lines = ["a 2"] + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines), nodetype=int) + lines = ["1 2"] + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines)) + lines = ["1 2", "2 {}"] + with pytest.raises(TypeError): + nx.parse_multiline_adjlist(iter(lines)) + + def test_multiline_adjlist_graph(self): + G = self.G + (fd, fname) = tempfile.mkstemp() + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname) + H2 = nx.read_multiline_adjlist(fname) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_multiline_adjlist_digraph(self): + G = self.DG + (fd, fname) = tempfile.mkstemp() + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname, create_using=nx.DiGraph()) + H2 = nx.read_multiline_adjlist(fname, create_using=nx.DiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_multiline_adjlist_integers(self): + (fd, fname) = tempfile.mkstemp() + G = nx.convert_node_labels_to_integers(self.G) + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname, nodetype=int) + H2 = nx.read_multiline_adjlist(fname, nodetype=int) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_multiline_adjlist_multigraph(self): + G = self.XG + (fd, fname) = tempfile.mkstemp() + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist(fname, nodetype=int, create_using=nx.MultiGraph()) + H2 = nx.read_multiline_adjlist( + fname, nodetype=int, create_using=nx.MultiGraph() + ) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_multiline_adjlist_multidigraph(self): + G = self.XDG + (fd, fname) = tempfile.mkstemp() + nx.write_multiline_adjlist(G, fname) + H = nx.read_multiline_adjlist( + fname, nodetype=int, create_using=nx.MultiDiGraph() + ) + H2 = nx.read_multiline_adjlist( + fname, nodetype=int, create_using=nx.MultiDiGraph() + ) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_multiline_adjlist_delimiter(self): + fh = io.BytesIO() + G = nx.path_graph(3) + nx.write_multiline_adjlist(G, fh, delimiter=":") + fh.seek(0) + H = nx.read_multiline_adjlist(fh, nodetype=int, delimiter=":") + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/test_edgelist.py b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_edgelist.py new file mode 100644 index 0000000000000000000000000000000000000000..18b726f43802c7e061e12991e343f517ed506c2c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_edgelist.py @@ -0,0 +1,314 @@ +""" + Unit tests for edgelists. +""" +import io +import os +import tempfile +import textwrap + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, graphs_equal, nodes_equal + +edges_no_data = textwrap.dedent( + """ + # comment line + 1 2 + # comment line + 2 3 + """ +) + + +edges_with_values = textwrap.dedent( + """ + # comment line + 1 2 2.0 + # comment line + 2 3 3.0 + """ +) + + +edges_with_weight = textwrap.dedent( + """ + # comment line + 1 2 {'weight':2.0} + # comment line + 2 3 {'weight':3.0} + """ +) + + +edges_with_multiple_attrs = textwrap.dedent( + """ + # comment line + 1 2 {'weight':2.0, 'color':'green'} + # comment line + 2 3 {'weight':3.0, 'color':'red'} + """ +) + + +edges_with_multiple_attrs_csv = textwrap.dedent( + """ + # comment line + 1, 2, {'weight':2.0, 'color':'green'} + # comment line + 2, 3, {'weight':3.0, 'color':'red'} + """ +) + + +_expected_edges_weights = [(1, 2, {"weight": 2.0}), (2, 3, {"weight": 3.0})] +_expected_edges_multiattr = [ + (1, 2, {"weight": 2.0, "color": "green"}), + (2, 3, {"weight": 3.0, "color": "red"}), +] + + +@pytest.mark.parametrize( + ("data", "extra_kwargs"), + ( + (edges_no_data, {}), + (edges_with_values, {}), + (edges_with_weight, {}), + (edges_with_multiple_attrs, {}), + (edges_with_multiple_attrs_csv, {"delimiter": ","}), + ), +) +def test_read_edgelist_no_data(data, extra_kwargs): + bytesIO = io.BytesIO(data.encode("utf-8")) + G = nx.read_edgelist(bytesIO, nodetype=int, data=False, **extra_kwargs) + assert edges_equal(G.edges(), [(1, 2), (2, 3)]) + + +def test_read_weighted_edgelist(): + bytesIO = io.BytesIO(edges_with_values.encode("utf-8")) + G = nx.read_weighted_edgelist(bytesIO, nodetype=int) + assert edges_equal(G.edges(data=True), _expected_edges_weights) + + +@pytest.mark.parametrize( + ("data", "extra_kwargs", "expected"), + ( + (edges_with_weight, {}, _expected_edges_weights), + (edges_with_multiple_attrs, {}, _expected_edges_multiattr), + (edges_with_multiple_attrs_csv, {"delimiter": ","}, _expected_edges_multiattr), + ), +) +def test_read_edgelist_with_data(data, extra_kwargs, expected): + bytesIO = io.BytesIO(data.encode("utf-8")) + G = nx.read_edgelist(bytesIO, nodetype=int, **extra_kwargs) + assert edges_equal(G.edges(data=True), expected) + + +@pytest.fixture +def example_graph(): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 3.0), (2, 3, 27.0), (3, 4, 3.0)]) + return G + + +def test_parse_edgelist_no_data(example_graph): + G = example_graph + H = nx.parse_edgelist(["1 2", "2 3", "3 4"], nodetype=int) + assert nodes_equal(G.nodes, H.nodes) + assert edges_equal(G.edges, H.edges) + + +def test_parse_edgelist_with_data_dict(example_graph): + G = example_graph + H = nx.parse_edgelist( + ["1 2 {'weight': 3}", "2 3 {'weight': 27}", "3 4 {'weight': 3.0}"], nodetype=int + ) + assert nodes_equal(G.nodes, H.nodes) + assert edges_equal(G.edges(data=True), H.edges(data=True)) + + +def test_parse_edgelist_with_data_list(example_graph): + G = example_graph + H = nx.parse_edgelist( + ["1 2 3", "2 3 27", "3 4 3.0"], nodetype=int, data=(("weight", float),) + ) + assert nodes_equal(G.nodes, H.nodes) + assert edges_equal(G.edges(data=True), H.edges(data=True)) + + +def test_parse_edgelist(): + # ignore lines with less than 2 nodes + lines = ["1;2", "2 3", "3 4"] + G = nx.parse_edgelist(lines, nodetype=int) + assert list(G.edges()) == [(2, 3), (3, 4)] + # unknown nodetype + with pytest.raises(TypeError, match="Failed to convert nodes"): + lines = ["1 2", "2 3", "3 4"] + nx.parse_edgelist(lines, nodetype="nope") + # lines have invalid edge format + with pytest.raises(TypeError, match="Failed to convert edge data"): + lines = ["1 2 3", "2 3", "3 4"] + nx.parse_edgelist(lines, nodetype=int) + # edge data and data_keys not the same length + with pytest.raises(IndexError, match="not the same length"): + lines = ["1 2 3", "2 3 27", "3 4 3.0"] + nx.parse_edgelist( + lines, nodetype=int, data=(("weight", float), ("capacity", int)) + ) + # edge data can't be converted to edge type + with pytest.raises(TypeError, match="Failed to convert"): + lines = ["1 2 't1'", "2 3 't3'", "3 4 't3'"] + nx.parse_edgelist(lines, nodetype=int, data=(("weight", float),)) + + +def test_comments_None(): + edgelist = ["node#1 node#2", "node#2 node#3"] + # comments=None supported to ignore all comment characters + G = nx.parse_edgelist(edgelist, comments=None) + H = nx.Graph([e.split(" ") for e in edgelist]) + assert edges_equal(G.edges, H.edges) + + +class TestEdgelist: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_node("g") + cls.DG = nx.DiGraph(cls.G) + cls.XG = nx.MultiGraph() + cls.XG.add_weighted_edges_from([(1, 2, 5), (1, 2, 5), (1, 2, 1), (3, 3, 42)]) + cls.XDG = nx.MultiDiGraph(cls.XG) + + def test_write_edgelist_1(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + nx.write_edgelist(G, fh, data=False) + fh.seek(0) + assert fh.read() == b"1 2\n2 3\n" + + def test_write_edgelist_2(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + nx.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {}\n2 3 {}\n" + + def test_write_edgelist_3(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + nx.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {'weight': 2.0}\n2 3 {'weight': 3.0}\n" + + def test_write_edgelist_4(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + nx.write_edgelist(G, fh, data=[("weight")]) + fh.seek(0) + assert fh.read() == b"1 2 2.0\n2 3 3.0\n" + + def test_unicode(self): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + fd, fname = tempfile.mkstemp() + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname) + assert graphs_equal(G, H) + os.close(fd) + os.unlink(fname) + + def test_latin1_issue(self): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + fd, fname = tempfile.mkstemp() + pytest.raises( + UnicodeEncodeError, nx.write_edgelist, G, fname, encoding="latin-1" + ) + os.close(fd) + os.unlink(fname) + + def test_latin1(self): + G = nx.Graph() + name1 = "Bj" + chr(246) + "rk" + name2 = chr(220) + "ber" + G.add_edge(name1, "Radiohead", **{name2: 3}) + fd, fname = tempfile.mkstemp() + nx.write_edgelist(G, fname, encoding="latin-1") + H = nx.read_edgelist(fname, encoding="latin-1") + assert graphs_equal(G, H) + os.close(fd) + os.unlink(fname) + + def test_edgelist_graph(self): + G = self.G + (fd, fname) = tempfile.mkstemp() + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname) + H2 = nx.read_edgelist(fname) + assert H is not H2 # they should be different graphs + G.remove_node("g") # isolated nodes are not written in edgelist + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_edgelist_digraph(self): + G = self.DG + (fd, fname) = tempfile.mkstemp() + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname, create_using=nx.DiGraph()) + H2 = nx.read_edgelist(fname, create_using=nx.DiGraph()) + assert H is not H2 # they should be different graphs + G.remove_node("g") # isolated nodes are not written in edgelist + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_edgelist_integers(self): + G = nx.convert_node_labels_to_integers(self.G) + (fd, fname) = tempfile.mkstemp() + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname, nodetype=int) + # isolated nodes are not written in edgelist + G.remove_nodes_from(list(nx.isolates(G))) + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_edgelist_multigraph(self): + G = self.XG + (fd, fname) = tempfile.mkstemp() + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + H2 = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_edgelist_multidigraph(self): + G = self.XDG + (fd, fname) = tempfile.mkstemp() + nx.write_edgelist(G, fname) + H = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiDiGraph()) + H2 = nx.read_edgelist(fname, nodetype=int, create_using=nx.MultiDiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/test_gexf.py b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_gexf.py new file mode 100644 index 0000000000000000000000000000000000000000..6ff14c99b1d5df41003b705b840a0968e0439239 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_gexf.py @@ -0,0 +1,557 @@ +import io +import time + +import pytest + +import networkx as nx + + +class TestGEXF: + @classmethod + def setup_class(cls): + cls.simple_directed_data = """ + + + + + + + + + + + +""" + cls.simple_directed_graph = nx.DiGraph() + cls.simple_directed_graph.add_node("0", label="Hello") + cls.simple_directed_graph.add_node("1", label="World") + cls.simple_directed_graph.add_edge("0", "1", id="0") + + cls.simple_directed_fh = io.BytesIO(cls.simple_directed_data.encode("UTF-8")) + + cls.attribute_data = """\ + + + Gephi.org + A Web network + + + + + + + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + cls.attribute_graph = nx.DiGraph() + cls.attribute_graph.graph["node_default"] = {"frog": True} + cls.attribute_graph.add_node( + "0", label="Gephi", url="https://gephi.org", indegree=1, frog=False + ) + cls.attribute_graph.add_node( + "1", label="Webatlas", url="http://webatlas.fr", indegree=2, frog=False + ) + cls.attribute_graph.add_node( + "2", label="RTGI", url="http://rtgi.fr", indegree=1, frog=True + ) + cls.attribute_graph.add_node( + "3", + label="BarabasiLab", + url="http://barabasilab.com", + indegree=1, + frog=True, + ) + cls.attribute_graph.add_edge("0", "1", id="0", label="foo") + cls.attribute_graph.add_edge("0", "2", id="1") + cls.attribute_graph.add_edge("1", "0", id="2") + cls.attribute_graph.add_edge("2", "1", id="3") + cls.attribute_graph.add_edge("0", "3", id="4") + cls.attribute_fh = io.BytesIO(cls.attribute_data.encode("UTF-8")) + + cls.simple_undirected_data = """ + + + + + + + + + + + +""" + cls.simple_undirected_graph = nx.Graph() + cls.simple_undirected_graph.add_node("0", label="Hello") + cls.simple_undirected_graph.add_node("1", label="World") + cls.simple_undirected_graph.add_edge("0", "1", id="0") + + cls.simple_undirected_fh = io.BytesIO( + cls.simple_undirected_data.encode("UTF-8") + ) + + def test_read_simple_directed_graphml(self): + G = self.simple_directed_graph + H = nx.read_gexf(self.simple_directed_fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + def test_write_read_simple_directed_graphml(self): + G = self.simple_directed_graph + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + def test_read_simple_undirected_graphml(self): + G = self.simple_undirected_graph + H = nx.read_gexf(self.simple_undirected_fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + self.simple_undirected_fh.seek(0) + + def test_read_attribute_graphml(self): + G = self.attribute_graph + H = nx.read_gexf(self.attribute_fh) + assert sorted(G.nodes(True)) == sorted(H.nodes(data=True)) + ge = sorted(G.edges(data=True)) + he = sorted(H.edges(data=True)) + for a, b in zip(ge, he): + assert a == b + self.attribute_fh.seek(0) + + def test_directed_edge_in_undirected(self): + s = """ + + + + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_gexf, fh) + + def test_undirected_edge_in_directed(self): + s = """ + + + + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_gexf, fh) + + def test_key_raises(self): + s = """ + + + + + + + + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_gexf, fh) + + def test_relabel(self): + s = """ + + + + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_gexf(fh, relabel=True) + assert sorted(G.nodes()) == ["Hello", "Word"] + + def test_default_attribute(self): + G = nx.Graph() + G.add_node(1, label="1", color="green") + nx.add_path(G, [0, 1, 2, 3]) + G.add_edge(1, 2, foo=3) + G.graph["node_default"] = {"color": "yellow"} + G.graph["edge_default"] = {"foo": 7} + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + # Reading a gexf graph always sets mode attribute to either + # 'static' or 'dynamic'. Remove the mode attribute from the + # read graph for the sake of comparing remaining attributes. + del H.graph["mode"] + assert G.graph == H.graph + + def test_serialize_ints_to_strings(self): + G = nx.Graph() + G.add_node(1, id=7, label=77) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert list(H) == [7] + assert H.nodes[7]["label"] == "77" + + def test_write_with_node_attributes(self): + # Addresses #673. + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 3)]) + for i in range(4): + G.nodes[i]["id"] = i + G.nodes[i]["label"] = i + G.nodes[i]["pid"] = i + G.nodes[i]["start"] = i + G.nodes[i]["end"] = i + 1 + + expected = f""" + + NetworkX {nx.__version__} + + + + + + + + + + + + + + +""" + obtained = "\n".join(nx.generate_gexf(G)) + assert expected == obtained + + def test_edge_id_construct(self): + G = nx.Graph() + G.add_edges_from([(0, 1, {"id": 0}), (1, 2, {"id": 2}), (2, 3)]) + + expected = f""" + + NetworkX {nx.__version__} + + + + + + + + + + + + + + +""" + + obtained = "\n".join(nx.generate_gexf(G)) + assert expected == obtained + + def test_numpy_type(self): + np = pytest.importorskip("numpy") + G = nx.path_graph(4) + nx.set_node_attributes(G, {n: n for n in np.arange(4)}, "number") + G[0][1]["edge-number"] = np.float64(1.1) + + expected = f""" + + NetworkX {nx.__version__} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + obtained = "\n".join(nx.generate_gexf(G)) + assert expected == obtained + + def test_bool(self): + G = nx.Graph() + G.add_node(1, testattr=True) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert H.nodes[1]["testattr"] + + # Test for NaN, INF and -INF + def test_specials(self): + from math import isnan + + inf, nan = float("inf"), float("nan") + G = nx.Graph() + G.add_node(1, testattr=inf, strdata="inf", key="a") + G.add_node(2, testattr=nan, strdata="nan", key="b") + G.add_node(3, testattr=-inf, strdata="-inf", key="c") + + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + filetext = fh.read() + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + + assert b"INF" in filetext + assert b"NaN" in filetext + assert b"-INF" in filetext + + assert H.nodes[1]["testattr"] == inf + assert isnan(H.nodes[2]["testattr"]) + assert H.nodes[3]["testattr"] == -inf + + assert H.nodes[1]["strdata"] == "inf" + assert H.nodes[2]["strdata"] == "nan" + assert H.nodes[3]["strdata"] == "-inf" + + assert H.nodes[1]["networkx_key"] == "a" + assert H.nodes[2]["networkx_key"] == "b" + assert H.nodes[3]["networkx_key"] == "c" + + def test_simple_list(self): + G = nx.Graph() + list_value = [(1, 2, 3), (9, 1, 2)] + G.add_node(1, key=list_value) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert H.nodes[1]["networkx_key"] == list_value + + def test_dynamic_mode(self): + G = nx.Graph() + G.add_node(1, label="1", color="green") + G.graph["mode"] = "dynamic" + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + def test_multigraph_with_missing_attributes(self): + G = nx.MultiGraph() + G.add_node(0, label="1", color="green") + G.add_node(1, label="2", color="green") + G.add_edge(0, 1, id="0", weight=3, type="undirected", start=0, end=1) + G.add_edge(0, 1, id="1", label="foo", start=0, end=1) + G.add_edge(0, 1) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + def test_missing_viz_attributes(self): + G = nx.Graph() + G.add_node(0, label="1", color="green") + G.nodes[0]["viz"] = {"size": 54} + G.nodes[0]["viz"]["position"] = {"x": 0, "y": 1, "z": 0} + G.nodes[0]["viz"]["color"] = {"r": 0, "g": 0, "b": 256} + G.nodes[0]["viz"]["shape"] = "http://random.url" + G.nodes[0]["viz"]["thickness"] = 2 + fh = io.BytesIO() + nx.write_gexf(G, fh, version="1.1draft") + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + # Test missing alpha value for version >draft1.1 - set default alpha value + # to 1.0 instead of `None` when writing for better general compatibility + fh = io.BytesIO() + # G.nodes[0]["viz"]["color"] does not have an alpha value explicitly defined + # so the default is used instead + nx.write_gexf(G, fh, version="1.2draft") + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert H.nodes[0]["viz"]["color"]["a"] == 1.0 + + # Second graph for the other branch + G = nx.Graph() + G.add_node(0, label="1", color="green") + G.nodes[0]["viz"] = {"size": 54} + G.nodes[0]["viz"]["position"] = {"x": 0, "y": 1, "z": 0} + G.nodes[0]["viz"]["color"] = {"r": 0, "g": 0, "b": 256, "a": 0.5} + G.nodes[0]["viz"]["shape"] = "ftp://random.url" + G.nodes[0]["viz"]["thickness"] = 2 + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + def test_slice_and_spell(self): + # Test spell first, so version = 1.2 + G = nx.Graph() + G.add_node(0, label="1", color="green") + G.nodes[0]["spells"] = [(1, 2)] + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + G = nx.Graph() + G.add_node(0, label="1", color="green") + G.nodes[0]["slices"] = [(1, 2)] + fh = io.BytesIO() + nx.write_gexf(G, fh, version="1.1draft") + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) + + def test_add_parent(self): + G = nx.Graph() + G.add_node(0, label="1", color="green", parents=[1, 2]) + fh = io.BytesIO() + nx.write_gexf(G, fh) + fh.seek(0) + H = nx.read_gexf(fh, node_type=int) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(sorted(e) for e in G.edges()) == sorted( + sorted(e) for e in H.edges() + ) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/test_gml.py b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_gml.py new file mode 100644 index 0000000000000000000000000000000000000000..869b21f6bb2cc0be5817a2529e137b45bd38c11e --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_gml.py @@ -0,0 +1,753 @@ +import codecs +import io +import math +import os +import tempfile +from ast import literal_eval +from contextlib import contextmanager +from textwrap import dedent + +import pytest + +import networkx as nx +from networkx.readwrite.gml import literal_destringizer, literal_stringizer + + +class TestGraph: + @classmethod + def setup_class(cls): + cls.simple_data = """Creator "me" +Version "xx" +graph [ + comment "This is a sample graph" + directed 1 + IsPlanar 1 + pos [ x 0 y 1 ] + node [ + id 1 + label "Node 1" + pos [ x 1 y 1 ] + ] + node [ + id 2 + pos [ x 1 y 2 ] + label "Node 2" + ] + node [ + id 3 + label "Node 3" + pos [ x 1 y 3 ] + ] + edge [ + source 1 + target 2 + label "Edge from node 1 to node 2" + color [line "blue" thickness 3] + + ] + edge [ + source 2 + target 3 + label "Edge from node 2 to node 3" + ] + edge [ + source 3 + target 1 + label "Edge from node 3 to node 1" + ] +] +""" + + def test_parse_gml_cytoscape_bug(self): + # example from issue #321, originally #324 in trac + cytoscape_example = """ +Creator "Cytoscape" +Version 1.0 +graph [ + node [ + root_index -3 + id -3 + graphics [ + x -96.0 + y -67.0 + w 40.0 + h 40.0 + fill "#ff9999" + type "ellipse" + outline "#666666" + outline_width 1.5 + ] + label "node2" + ] + node [ + root_index -2 + id -2 + graphics [ + x 63.0 + y 37.0 + w 40.0 + h 40.0 + fill "#ff9999" + type "ellipse" + outline "#666666" + outline_width 1.5 + ] + label "node1" + ] + node [ + root_index -1 + id -1 + graphics [ + x -31.0 + y -17.0 + w 40.0 + h 40.0 + fill "#ff9999" + type "ellipse" + outline "#666666" + outline_width 1.5 + ] + label "node0" + ] + edge [ + root_index -2 + target -2 + source -1 + graphics [ + width 1.5 + fill "#0000ff" + type "line" + Line [ + ] + source_arrow 0 + target_arrow 3 + ] + label "DirectedEdge" + ] + edge [ + root_index -1 + target -1 + source -3 + graphics [ + width 1.5 + fill "#0000ff" + type "line" + Line [ + ] + source_arrow 0 + target_arrow 3 + ] + label "DirectedEdge" + ] +] +""" + nx.parse_gml(cytoscape_example) + + def test_parse_gml(self): + G = nx.parse_gml(self.simple_data, label="label") + assert sorted(G.nodes()) == ["Node 1", "Node 2", "Node 3"] + assert sorted(G.edges()) == [ + ("Node 1", "Node 2"), + ("Node 2", "Node 3"), + ("Node 3", "Node 1"), + ] + + assert sorted(G.edges(data=True)) == [ + ( + "Node 1", + "Node 2", + { + "color": {"line": "blue", "thickness": 3}, + "label": "Edge from node 1 to node 2", + }, + ), + ("Node 2", "Node 3", {"label": "Edge from node 2 to node 3"}), + ("Node 3", "Node 1", {"label": "Edge from node 3 to node 1"}), + ] + + def test_read_gml(self): + (fd, fname) = tempfile.mkstemp() + fh = open(fname, "w") + fh.write(self.simple_data) + fh.close() + Gin = nx.read_gml(fname, label="label") + G = nx.parse_gml(self.simple_data, label="label") + assert sorted(G.nodes(data=True)) == sorted(Gin.nodes(data=True)) + assert sorted(G.edges(data=True)) == sorted(Gin.edges(data=True)) + os.close(fd) + os.unlink(fname) + + def test_labels_are_strings(self): + # GML requires labels to be strings (i.e., in quotes) + answer = """graph [ + node [ + id 0 + label "1203" + ] +]""" + G = nx.Graph() + G.add_node(1203) + data = "\n".join(nx.generate_gml(G, stringizer=literal_stringizer)) + assert data == answer + + def test_relabel_duplicate(self): + data = """ +graph +[ + label "" + directed 1 + node + [ + id 0 + label "same" + ] + node + [ + id 1 + label "same" + ] +] +""" + fh = io.BytesIO(data.encode("UTF-8")) + fh.seek(0) + pytest.raises(nx.NetworkXError, nx.read_gml, fh, label="label") + + @pytest.mark.parametrize("stringizer", (None, literal_stringizer)) + def test_tuplelabels(self, stringizer): + # https://github.com/networkx/networkx/pull/1048 + # Writing tuple labels to GML failed. + G = nx.Graph() + G.add_edge((0, 1), (1, 0)) + data = "\n".join(nx.generate_gml(G, stringizer=stringizer)) + answer = """graph [ + node [ + id 0 + label "(0,1)" + ] + node [ + id 1 + label "(1,0)" + ] + edge [ + source 0 + target 1 + ] +]""" + assert data == answer + + def test_quotes(self): + # https://github.com/networkx/networkx/issues/1061 + # Encoding quotes as HTML entities. + G = nx.path_graph(1) + G.name = "path_graph(1)" + attr = 'This is "quoted" and this is a copyright: ' + chr(169) + G.nodes[0]["demo"] = attr + fobj = tempfile.NamedTemporaryFile() + nx.write_gml(G, fobj) + fobj.seek(0) + # Should be bytes in 2.x and 3.x + data = fobj.read().strip().decode("ascii") + answer = """graph [ + name "path_graph(1)" + node [ + id 0 + label "0" + demo "This is "quoted" and this is a copyright: ©" + ] +]""" + assert data == answer + + def test_unicode_node(self): + node = "node" + chr(169) + G = nx.Graph() + G.add_node(node) + fobj = tempfile.NamedTemporaryFile() + nx.write_gml(G, fobj) + fobj.seek(0) + # Should be bytes in 2.x and 3.x + data = fobj.read().strip().decode("ascii") + answer = """graph [ + node [ + id 0 + label "node©" + ] +]""" + assert data == answer + + def test_float_label(self): + node = 1.0 + G = nx.Graph() + G.add_node(node) + fobj = tempfile.NamedTemporaryFile() + nx.write_gml(G, fobj) + fobj.seek(0) + # Should be bytes in 2.x and 3.x + data = fobj.read().strip().decode("ascii") + answer = """graph [ + node [ + id 0 + label "1.0" + ] +]""" + assert data == answer + + def test_special_float_label(self): + special_floats = [float("nan"), float("+inf"), float("-inf")] + try: + import numpy as np + + special_floats += [np.nan, np.inf, np.inf * -1] + except ImportError: + special_floats += special_floats + + G = nx.cycle_graph(len(special_floats)) + attrs = dict(enumerate(special_floats)) + nx.set_node_attributes(G, attrs, "nodefloat") + edges = list(G.edges) + attrs = {edges[i]: value for i, value in enumerate(special_floats)} + nx.set_edge_attributes(G, attrs, "edgefloat") + + fobj = tempfile.NamedTemporaryFile() + nx.write_gml(G, fobj) + fobj.seek(0) + # Should be bytes in 2.x and 3.x + data = fobj.read().strip().decode("ascii") + answer = """graph [ + node [ + id 0 + label "0" + nodefloat NAN + ] + node [ + id 1 + label "1" + nodefloat +INF + ] + node [ + id 2 + label "2" + nodefloat -INF + ] + node [ + id 3 + label "3" + nodefloat NAN + ] + node [ + id 4 + label "4" + nodefloat +INF + ] + node [ + id 5 + label "5" + nodefloat -INF + ] + edge [ + source 0 + target 1 + edgefloat NAN + ] + edge [ + source 0 + target 5 + edgefloat +INF + ] + edge [ + source 1 + target 2 + edgefloat -INF + ] + edge [ + source 2 + target 3 + edgefloat NAN + ] + edge [ + source 3 + target 4 + edgefloat +INF + ] + edge [ + source 4 + target 5 + edgefloat -INF + ] +]""" + assert data == answer + + fobj.seek(0) + graph = nx.read_gml(fobj) + for indx, value in enumerate(special_floats): + node_value = graph.nodes[str(indx)]["nodefloat"] + if math.isnan(value): + assert math.isnan(node_value) + else: + assert node_value == value + + edge = edges[indx] + string_edge = (str(edge[0]), str(edge[1])) + edge_value = graph.edges[string_edge]["edgefloat"] + if math.isnan(value): + assert math.isnan(edge_value) + else: + assert edge_value == value + + def test_name(self): + G = nx.parse_gml('graph [ name "x" node [ id 0 label "x" ] ]') + assert "x" == G.graph["name"] + G = nx.parse_gml('graph [ node [ id 0 label "x" ] ]') + assert "" == G.name + assert "name" not in G.graph + + def test_graph_types(self): + for directed in [None, False, True]: + for multigraph in [None, False, True]: + gml = "graph [" + if directed is not None: + gml += " directed " + str(int(directed)) + if multigraph is not None: + gml += " multigraph " + str(int(multigraph)) + gml += ' node [ id 0 label "0" ]' + gml += " edge [ source 0 target 0 ]" + gml += " ]" + G = nx.parse_gml(gml) + assert bool(directed) == G.is_directed() + assert bool(multigraph) == G.is_multigraph() + gml = "graph [\n" + if directed is True: + gml += " directed 1\n" + if multigraph is True: + gml += " multigraph 1\n" + gml += """ node [ + id 0 + label "0" + ] + edge [ + source 0 + target 0 +""" + if multigraph: + gml += " key 0\n" + gml += " ]\n]" + assert gml == "\n".join(nx.generate_gml(G)) + + def test_data_types(self): + data = [ + True, + False, + 10**20, + -2e33, + "'", + '"&&&""', + [{(b"\xfd",): "\x7f", chr(0x4444): (1, 2)}, (2, "3")], + ] + data.append(chr(0x14444)) + data.append(literal_eval("{2.3j, 1 - 2.3j, ()}")) + G = nx.Graph() + G.name = data + G.graph["data"] = data + G.add_node(0, int=-1, data={"data": data}) + G.add_edge(0, 0, float=-2.5, data=data) + gml = "\n".join(nx.generate_gml(G, stringizer=literal_stringizer)) + G = nx.parse_gml(gml, destringizer=literal_destringizer) + assert data == G.name + assert {"name": data, "data": data} == G.graph + assert list(G.nodes(data=True)) == [(0, {"int": -1, "data": {"data": data}})] + assert list(G.edges(data=True)) == [(0, 0, {"float": -2.5, "data": data})] + G = nx.Graph() + G.graph["data"] = "frozenset([1, 2, 3])" + G = nx.parse_gml(nx.generate_gml(G), destringizer=literal_eval) + assert G.graph["data"] == "frozenset([1, 2, 3])" + + def test_escape_unescape(self): + gml = """graph [ + name "&"䑄��&unknown;" +]""" + G = nx.parse_gml(gml) + assert ( + '&"\x0f' + chr(0x4444) + "��&unknown;" + == G.name + ) + gml = "\n".join(nx.generate_gml(G)) + alnu = "#1234567890;&#x1234567890abcdef" + answer = ( + """graph [ + name "&"䑄&""" + + alnu + + """;&unknown;" +]""" + ) + assert answer == gml + + def test_exceptions(self): + pytest.raises(ValueError, literal_destringizer, "(") + pytest.raises(ValueError, literal_destringizer, "frozenset([1, 2, 3])") + pytest.raises(ValueError, literal_destringizer, literal_destringizer) + pytest.raises(ValueError, literal_stringizer, frozenset([1, 2, 3])) + pytest.raises(ValueError, literal_stringizer, literal_stringizer) + with tempfile.TemporaryFile() as f: + f.write(codecs.BOM_UTF8 + b"graph[]") + f.seek(0) + pytest.raises(nx.NetworkXError, nx.read_gml, f) + + def assert_parse_error(gml): + pytest.raises(nx.NetworkXError, nx.parse_gml, gml) + + assert_parse_error(["graph [\n\n", "]"]) + assert_parse_error("") + assert_parse_error('Creator ""') + assert_parse_error("0") + assert_parse_error("graph ]") + assert_parse_error("graph [ 1 ]") + assert_parse_error("graph [ 1.E+2 ]") + assert_parse_error('graph [ "A" ]') + assert_parse_error("graph [ ] graph ]") + assert_parse_error("graph [ ] graph [ ]") + assert_parse_error("graph [ data [1, 2, 3] ]") + assert_parse_error("graph [ node [ ] ]") + assert_parse_error("graph [ node [ id 0 ] ]") + nx.parse_gml('graph [ node [ id "a" ] ]', label="id") + assert_parse_error("graph [ node [ id 0 label 0 ] node [ id 0 label 1 ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] node [ id 1 label 0 ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] edge [ ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] edge [ source 0 ] ]") + nx.parse_gml("graph [edge [ source 0 target 0 ] node [ id 0 label 0 ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] edge [ source 1 target 0 ] ]") + assert_parse_error("graph [ node [ id 0 label 0 ] edge [ source 0 target 1 ] ]") + assert_parse_error( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 ] edge [ source 1 target 0 ] ]" + ) + nx.parse_gml( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 ] edge [ source 1 target 0 ] " + "directed 1 ]" + ) + nx.parse_gml( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 ] edge [ source 0 target 1 ]" + "multigraph 1 ]" + ) + nx.parse_gml( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 key 0 ] edge [ source 0 target 1 ]" + "multigraph 1 ]" + ) + assert_parse_error( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 key 0 ] edge [ source 0 target 1 key 0 ]" + "multigraph 1 ]" + ) + nx.parse_gml( + "graph [ node [ id 0 label 0 ] node [ id 1 label 1 ] " + "edge [ source 0 target 1 key 0 ] edge [ source 1 target 0 key 0 ]" + "directed 1 multigraph 1 ]" + ) + + # Tests for string convertible alphanumeric id and label values + nx.parse_gml("graph [edge [ source a target a ] node [ id a label b ] ]") + nx.parse_gml( + "graph [ node [ id n42 label 0 ] node [ id x43 label 1 ]" + "edge [ source n42 target x43 key 0 ]" + "edge [ source x43 target n42 key 0 ]" + "directed 1 multigraph 1 ]" + ) + assert_parse_error( + "graph [edge [ source u'u\4200' target u'u\4200' ] " + + "node [ id u'u\4200' label b ] ]" + ) + + def assert_generate_error(*args, **kwargs): + pytest.raises( + nx.NetworkXError, lambda: list(nx.generate_gml(*args, **kwargs)) + ) + + G = nx.Graph() + G.graph[3] = 3 + assert_generate_error(G) + G = nx.Graph() + G.graph["3"] = 3 + assert_generate_error(G) + G = nx.Graph() + G.graph["data"] = frozenset([1, 2, 3]) + assert_generate_error(G, stringizer=literal_stringizer) + + def test_label_kwarg(self): + G = nx.parse_gml(self.simple_data, label="id") + assert sorted(G.nodes) == [1, 2, 3] + labels = [G.nodes[n]["label"] for n in sorted(G.nodes)] + assert labels == ["Node 1", "Node 2", "Node 3"] + + G = nx.parse_gml(self.simple_data, label=None) + assert sorted(G.nodes) == [1, 2, 3] + labels = [G.nodes[n]["label"] for n in sorted(G.nodes)] + assert labels == ["Node 1", "Node 2", "Node 3"] + + def test_outofrange_integers(self): + # GML restricts integers to 32 signed bits. + # Check that we honor this restriction on export + G = nx.Graph() + # Test export for numbers that barely fit or don't fit into 32 bits, + # and 3 numbers in the middle + numbers = { + "toosmall": (-(2**31)) - 1, + "small": -(2**31), + "med1": -4, + "med2": 0, + "med3": 17, + "big": (2**31) - 1, + "toobig": 2**31, + } + G.add_node("Node", **numbers) + + fd, fname = tempfile.mkstemp() + try: + nx.write_gml(G, fname) + # Check that the export wrote the nonfitting numbers as strings + G2 = nx.read_gml(fname) + for attr, value in G2.nodes["Node"].items(): + if attr == "toosmall" or attr == "toobig": + assert type(value) == str + else: + assert type(value) == int + finally: + os.close(fd) + os.unlink(fname) + + def test_multiline(self): + # example from issue #6836 + multiline_example = """ +graph +[ + node + [ + id 0 + label "multiline node" + label2 "multiline1 + multiline2 + multiline3" + alt_name "id 0" + ] +] +""" + G = nx.parse_gml(multiline_example) + assert G.nodes["multiline node"] == { + "label2": "multiline1 multiline2 multiline3", + "alt_name": "id 0", + } + + +@contextmanager +def byte_file(): + _file_handle = io.BytesIO() + yield _file_handle + _file_handle.seek(0) + + +class TestPropertyLists: + def test_writing_graph_with_multi_element_property_list(self): + g = nx.Graph() + g.add_node("n1", properties=["element", 0, 1, 2.5, True, False]) + with byte_file() as f: + nx.write_gml(g, f) + result = f.read().decode() + + assert result == dedent( + """\ + graph [ + node [ + id 0 + label "n1" + properties "element" + properties 0 + properties 1 + properties 2.5 + properties 1 + properties 0 + ] + ] + """ + ) + + def test_writing_graph_with_one_element_property_list(self): + g = nx.Graph() + g.add_node("n1", properties=["element"]) + with byte_file() as f: + nx.write_gml(g, f) + result = f.read().decode() + + assert result == dedent( + """\ + graph [ + node [ + id 0 + label "n1" + properties "_networkx_list_start" + properties "element" + ] + ] + """ + ) + + def test_reading_graph_with_list_property(self): + with byte_file() as f: + f.write( + dedent( + """ + graph [ + node [ + id 0 + label "n1" + properties "element" + properties 0 + properties 1 + properties 2.5 + ] + ] + """ + ).encode("ascii") + ) + f.seek(0) + graph = nx.read_gml(f) + assert graph.nodes(data=True)["n1"] == {"properties": ["element", 0, 1, 2.5]} + + def test_reading_graph_with_single_element_list_property(self): + with byte_file() as f: + f.write( + dedent( + """ + graph [ + node [ + id 0 + label "n1" + properties "_networkx_list_start" + properties "element" + ] + ] + """ + ).encode("ascii") + ) + f.seek(0) + graph = nx.read_gml(f) + assert graph.nodes(data=True)["n1"] == {"properties": ["element"]} + + +@pytest.mark.parametrize("coll", ([], ())) +def test_stringize_empty_list_tuple(coll): + G = nx.path_graph(2) + G.nodes[0]["test"] = coll # test serializing an empty collection + f = io.BytesIO() + nx.write_gml(G, f) # Smoke test - should not raise + f.seek(0) + H = nx.read_gml(f) + assert H.nodes["0"]["test"] == coll # Check empty list round-trips properly + # Check full round-tripping. Note that nodes are loaded as strings by + # default, so there needs to be some remapping prior to comparison + H = nx.relabel_nodes(H, {"0": 0, "1": 1}) + assert nx.utils.graphs_equal(G, H) + # Same as above, but use destringizer for node remapping. Should have no + # effect on node attr + f.seek(0) + H = nx.read_gml(f, destringizer=int) + assert nx.utils.graphs_equal(G, H) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/test_graph6.py b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_graph6.py new file mode 100644 index 0000000000000000000000000000000000000000..062a96f0b12a28251c365f2763f527eaf8337fe6 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_graph6.py @@ -0,0 +1,169 @@ +import tempfile +from io import BytesIO + +import pytest + +import networkx as nx +import networkx.readwrite.graph6 as g6 +from networkx.utils import edges_equal, nodes_equal + + +class TestGraph6Utils: + def test_n_data_n_conversion(self): + for i in [0, 1, 42, 62, 63, 64, 258047, 258048, 7744773, 68719476735]: + assert g6.data_to_n(g6.n_to_data(i))[0] == i + assert g6.data_to_n(g6.n_to_data(i))[1] == [] + assert g6.data_to_n(g6.n_to_data(i) + [42, 43])[1] == [42, 43] + + +class TestFromGraph6Bytes: + def test_from_graph6_bytes(self): + data = b"DF{" + G = nx.from_graph6_bytes(data) + assert nodes_equal(G.nodes(), [0, 1, 2, 3, 4]) + assert edges_equal( + G.edges(), [(0, 3), (0, 4), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + ) + + def test_read_equals_from_bytes(self): + data = b"DF{" + G = nx.from_graph6_bytes(data) + fh = BytesIO(data) + Gin = nx.read_graph6(fh) + assert nodes_equal(G.nodes(), Gin.nodes()) + assert edges_equal(G.edges(), Gin.edges()) + + +class TestReadGraph6: + def test_read_many_graph6(self): + """Test for reading many graphs from a file into a list.""" + data = b"DF{\nD`{\nDqK\nD~{\n" + fh = BytesIO(data) + glist = nx.read_graph6(fh) + assert len(glist) == 4 + for G in glist: + assert sorted(G) == list(range(5)) + + +class TestWriteGraph6: + """Unit tests for writing a graph to a file in graph6 format.""" + + def test_null_graph(self): + result = BytesIO() + nx.write_graph6(nx.null_graph(), result) + assert result.getvalue() == b">>graph6<>graph6<<@\n" + + def test_complete_graph(self): + result = BytesIO() + nx.write_graph6(nx.complete_graph(4), result) + assert result.getvalue() == b">>graph6<>graph6<>graph6<>graph6<>graph6<<@\n" + + def test_complete_graph(self): + assert g6.to_graph6_bytes(nx.complete_graph(4)) == b">>graph6<>graph6< + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + cls.simple_directed_graph = nx.DiGraph() + cls.simple_directed_graph.add_node("n10") + cls.simple_directed_graph.add_edge("n0", "n2", id="foo") + cls.simple_directed_graph.add_edge("n0", "n2") + cls.simple_directed_graph.add_edges_from( + [ + ("n1", "n2"), + ("n2", "n3"), + ("n3", "n5"), + ("n3", "n4"), + ("n4", "n6"), + ("n6", "n5"), + ("n5", "n7"), + ("n6", "n8"), + ("n8", "n7"), + ("n8", "n9"), + ] + ) + cls.simple_directed_fh = io.BytesIO(cls.simple_directed_data.encode("UTF-8")) + + cls.attribute_data = """ + + + yellow + + + + + green + + + + blue + + + red + + + + turquoise + + + 1.0 + + + 1.0 + + + 2.0 + + + + + + 1.1 + + + +""" + cls.attribute_graph = nx.DiGraph(id="G") + cls.attribute_graph.graph["node_default"] = {"color": "yellow"} + cls.attribute_graph.add_node("n0", color="green") + cls.attribute_graph.add_node("n2", color="blue") + cls.attribute_graph.add_node("n3", color="red") + cls.attribute_graph.add_node("n4") + cls.attribute_graph.add_node("n5", color="turquoise") + cls.attribute_graph.add_edge("n0", "n2", id="e0", weight=1.0) + cls.attribute_graph.add_edge("n0", "n1", id="e1", weight=1.0) + cls.attribute_graph.add_edge("n1", "n3", id="e2", weight=2.0) + cls.attribute_graph.add_edge("n3", "n2", id="e3") + cls.attribute_graph.add_edge("n2", "n4", id="e4") + cls.attribute_graph.add_edge("n3", "n5", id="e5") + cls.attribute_graph.add_edge("n5", "n4", id="e6", weight=1.1) + cls.attribute_fh = io.BytesIO(cls.attribute_data.encode("UTF-8")) + + cls.node_attribute_default_data = """ + + false + 0 + 0 + 0.0 + 0.0 + Foo + + + + + + + """ + cls.node_attribute_default_graph = nx.DiGraph(id="G") + cls.node_attribute_default_graph.graph["node_default"] = { + "boolean_attribute": False, + "int_attribute": 0, + "long_attribute": 0, + "float_attribute": 0.0, + "double_attribute": 0.0, + "string_attribute": "Foo", + } + cls.node_attribute_default_graph.add_node("n0") + cls.node_attribute_default_graph.add_node("n1") + cls.node_attribute_default_graph.add_edge("n0", "n1", id="e0") + cls.node_attribute_default_fh = io.BytesIO( + cls.node_attribute_default_data.encode("UTF-8") + ) + + cls.attribute_named_key_ids_data = """ + + + + + + + val1 + val2 + + + val_one + val2 + + + edge_value + + + +""" + cls.attribute_named_key_ids_graph = nx.DiGraph() + cls.attribute_named_key_ids_graph.add_node("0", prop1="val1", prop2="val2") + cls.attribute_named_key_ids_graph.add_node("1", prop1="val_one", prop2="val2") + cls.attribute_named_key_ids_graph.add_edge("0", "1", edge_prop="edge_value") + fh = io.BytesIO(cls.attribute_named_key_ids_data.encode("UTF-8")) + cls.attribute_named_key_ids_fh = fh + + cls.attribute_numeric_type_data = """ + + + + + + 1 + + + 2.0 + + + 1 + + + k + + + 1.0 + + + +""" + cls.attribute_numeric_type_graph = nx.DiGraph() + cls.attribute_numeric_type_graph.add_node("n0", weight=1) + cls.attribute_numeric_type_graph.add_node("n1", weight=2.0) + cls.attribute_numeric_type_graph.add_edge("n0", "n1", weight=1) + cls.attribute_numeric_type_graph.add_edge("n1", "n1", weight=1.0) + fh = io.BytesIO(cls.attribute_numeric_type_data.encode("UTF-8")) + cls.attribute_numeric_type_fh = fh + + cls.simple_undirected_data = """ + + + + + + + + + + +""" + # + cls.simple_undirected_graph = nx.Graph() + cls.simple_undirected_graph.add_node("n10") + cls.simple_undirected_graph.add_edge("n0", "n2", id="foo") + cls.simple_undirected_graph.add_edges_from([("n1", "n2"), ("n2", "n3")]) + fh = io.BytesIO(cls.simple_undirected_data.encode("UTF-8")) + cls.simple_undirected_fh = fh + + cls.undirected_multigraph_data = """ + + + + + + + + + + +""" + cls.undirected_multigraph = nx.MultiGraph() + cls.undirected_multigraph.add_node("n10") + cls.undirected_multigraph.add_edge("n0", "n2", id="e0") + cls.undirected_multigraph.add_edge("n1", "n2", id="e1") + cls.undirected_multigraph.add_edge("n2", "n1", id="e2") + fh = io.BytesIO(cls.undirected_multigraph_data.encode("UTF-8")) + cls.undirected_multigraph_fh = fh + + cls.undirected_multigraph_no_multiedge_data = """ + + + + + + + + + + +""" + cls.undirected_multigraph_no_multiedge = nx.MultiGraph() + cls.undirected_multigraph_no_multiedge.add_node("n10") + cls.undirected_multigraph_no_multiedge.add_edge("n0", "n2", id="e0") + cls.undirected_multigraph_no_multiedge.add_edge("n1", "n2", id="e1") + cls.undirected_multigraph_no_multiedge.add_edge("n2", "n3", id="e2") + fh = io.BytesIO(cls.undirected_multigraph_no_multiedge_data.encode("UTF-8")) + cls.undirected_multigraph_no_multiedge_fh = fh + + cls.multigraph_only_ids_for_multiedges_data = """ + + + + + + + + + + +""" + cls.multigraph_only_ids_for_multiedges = nx.MultiGraph() + cls.multigraph_only_ids_for_multiedges.add_node("n10") + cls.multigraph_only_ids_for_multiedges.add_edge("n0", "n2") + cls.multigraph_only_ids_for_multiedges.add_edge("n1", "n2", id="e1") + cls.multigraph_only_ids_for_multiedges.add_edge("n2", "n1", id="e2") + fh = io.BytesIO(cls.multigraph_only_ids_for_multiedges_data.encode("UTF-8")) + cls.multigraph_only_ids_for_multiedges_fh = fh + + +class TestReadGraphML(BaseGraphML): + def test_read_simple_directed_graphml(self): + G = self.simple_directed_graph + H = nx.read_graphml(self.simple_directed_fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + PG = nx.parse_graphml(self.simple_directed_data) + assert sorted(G.nodes()) == sorted(PG.nodes()) + assert sorted(G.edges()) == sorted(PG.edges()) + assert sorted(G.edges(data=True)) == sorted(PG.edges(data=True)) + + def test_read_simple_undirected_graphml(self): + G = self.simple_undirected_graph + H = nx.read_graphml(self.simple_undirected_fh) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + self.simple_undirected_fh.seek(0) + + PG = nx.parse_graphml(self.simple_undirected_data) + assert nodes_equal(G.nodes(), PG.nodes()) + assert edges_equal(G.edges(), PG.edges()) + + def test_read_undirected_multigraph_graphml(self): + G = self.undirected_multigraph + H = nx.read_graphml(self.undirected_multigraph_fh) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + self.undirected_multigraph_fh.seek(0) + + PG = nx.parse_graphml(self.undirected_multigraph_data) + assert nodes_equal(G.nodes(), PG.nodes()) + assert edges_equal(G.edges(), PG.edges()) + + def test_read_undirected_multigraph_no_multiedge_graphml(self): + G = self.undirected_multigraph_no_multiedge + H = nx.read_graphml(self.undirected_multigraph_no_multiedge_fh) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + self.undirected_multigraph_no_multiedge_fh.seek(0) + + PG = nx.parse_graphml(self.undirected_multigraph_no_multiedge_data) + assert nodes_equal(G.nodes(), PG.nodes()) + assert edges_equal(G.edges(), PG.edges()) + + def test_read_undirected_multigraph_only_ids_for_multiedges_graphml(self): + G = self.multigraph_only_ids_for_multiedges + H = nx.read_graphml(self.multigraph_only_ids_for_multiedges_fh) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + self.multigraph_only_ids_for_multiedges_fh.seek(0) + + PG = nx.parse_graphml(self.multigraph_only_ids_for_multiedges_data) + assert nodes_equal(G.nodes(), PG.nodes()) + assert edges_equal(G.edges(), PG.edges()) + + def test_read_attribute_graphml(self): + G = self.attribute_graph + H = nx.read_graphml(self.attribute_fh) + assert nodes_equal(G.nodes(True), sorted(H.nodes(data=True))) + ge = sorted(G.edges(data=True)) + he = sorted(H.edges(data=True)) + for a, b in zip(ge, he): + assert a == b + self.attribute_fh.seek(0) + + PG = nx.parse_graphml(self.attribute_data) + assert sorted(G.nodes(True)) == sorted(PG.nodes(data=True)) + ge = sorted(G.edges(data=True)) + he = sorted(PG.edges(data=True)) + for a, b in zip(ge, he): + assert a == b + + def test_node_default_attribute_graphml(self): + G = self.node_attribute_default_graph + H = nx.read_graphml(self.node_attribute_default_fh) + assert G.graph["node_default"] == H.graph["node_default"] + + def test_directed_edge_in_undirected(self): + s = """ + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, s) + + def test_undirected_edge_in_directed(self): + s = """ + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, s) + + def test_key_raise(self): + s = """ + + + yellow + + + + + green + + + + blue + + + 1.0 + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, s) + + def test_hyperedge_raise(self): + s = """ + + + yellow + + + + + green + + + + blue + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, s) + + def test_multigraph_keys(self): + # Test that reading multigraphs uses edge id attributes as keys + s = """ + + + + + + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + expected = [("n0", "n1", "e0"), ("n0", "n1", "e1")] + assert sorted(G.edges(keys=True)) == expected + fh.seek(0) + H = nx.parse_graphml(s) + assert sorted(H.edges(keys=True)) == expected + + def test_preserve_multi_edge_data(self): + """ + Test that data and keys of edges are preserved on consequent + write and reads + """ + G = nx.MultiGraph() + G.add_node(1) + G.add_node(2) + G.add_edges_from( + [ + # edges with no data, no keys: + (1, 2), + # edges with only data: + (1, 2, {"key": "data_key1"}), + (1, 2, {"id": "data_id2"}), + (1, 2, {"key": "data_key3", "id": "data_id3"}), + # edges with both data and keys: + (1, 2, 103, {"key": "data_key4"}), + (1, 2, 104, {"id": "data_id5"}), + (1, 2, 105, {"key": "data_key6", "id": "data_id7"}), + ] + ) + fh = io.BytesIO() + nx.write_graphml(G, fh) + fh.seek(0) + H = nx.read_graphml(fh, node_type=int) + assert edges_equal(G.edges(data=True, keys=True), H.edges(data=True, keys=True)) + assert G._adj == H._adj + + Gadj = { + str(node): { + str(nbr): {str(ekey): dd for ekey, dd in key_dict.items()} + for nbr, key_dict in nbr_dict.items() + } + for node, nbr_dict in G._adj.items() + } + fh.seek(0) + HH = nx.read_graphml(fh, node_type=str, edge_key_type=str) + assert Gadj == HH._adj + + fh.seek(0) + string_fh = fh.read() + HH = nx.parse_graphml(string_fh, node_type=str, edge_key_type=str) + assert Gadj == HH._adj + + def test_yfiles_extension(self): + data = """ + + + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + 2 + + + + + + + + + + + + 3 + + + + + + + + + + + + + + + + + + + + +""" + fh = io.BytesIO(data.encode("UTF-8")) + G = nx.read_graphml(fh, force_multigraph=True) + assert list(G.edges()) == [("n0", "n1")] + assert G.has_edge("n0", "n1", key="e0") + assert G.nodes["n0"]["label"] == "1" + assert G.nodes["n1"]["label"] == "2" + assert G.nodes["n2"]["label"] == "3" + assert G.nodes["n0"]["shape_type"] == "rectangle" + assert G.nodes["n1"]["shape_type"] == "rectangle" + assert G.nodes["n2"]["shape_type"] == "com.yworks.flowchart.terminator" + assert G.nodes["n2"]["description"] == "description\nline1\nline2" + fh.seek(0) + G = nx.read_graphml(fh) + assert list(G.edges()) == [("n0", "n1")] + assert G["n0"]["n1"]["id"] == "e0" + assert G.nodes["n0"]["label"] == "1" + assert G.nodes["n1"]["label"] == "2" + assert G.nodes["n2"]["label"] == "3" + assert G.nodes["n0"]["shape_type"] == "rectangle" + assert G.nodes["n1"]["shape_type"] == "rectangle" + assert G.nodes["n2"]["shape_type"] == "com.yworks.flowchart.terminator" + assert G.nodes["n2"]["description"] == "description\nline1\nline2" + + H = nx.parse_graphml(data, force_multigraph=True) + assert list(H.edges()) == [("n0", "n1")] + assert H.has_edge("n0", "n1", key="e0") + assert H.nodes["n0"]["label"] == "1" + assert H.nodes["n1"]["label"] == "2" + assert H.nodes["n2"]["label"] == "3" + + H = nx.parse_graphml(data) + assert list(H.edges()) == [("n0", "n1")] + assert H["n0"]["n1"]["id"] == "e0" + assert H.nodes["n0"]["label"] == "1" + assert H.nodes["n1"]["label"] == "2" + assert H.nodes["n2"]["label"] == "3" + + def test_bool(self): + s = """ + + + false + + + + true + + + + false + + + FaLsE + + + True + + + 0 + + + 1 + + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + H = nx.parse_graphml(s) + for graph in [G, H]: + assert graph.nodes["n0"]["test"] + assert not graph.nodes["n2"]["test"] + assert not graph.nodes["n3"]["test"] + assert graph.nodes["n4"]["test"] + assert not graph.nodes["n5"]["test"] + assert graph.nodes["n6"]["test"] + + def test_graphml_header_line(self): + good = """ + + + false + + + + true + + + +""" + bad = """ + + + false + + + + true + + + +""" + ugly = """ + + + false + + + + true + + + +""" + for s in (good, bad): + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + H = nx.parse_graphml(s) + for graph in [G, H]: + assert graph.nodes["n0"]["test"] + + fh = io.BytesIO(ugly.encode("UTF-8")) + pytest.raises(nx.NetworkXError, nx.read_graphml, fh) + pytest.raises(nx.NetworkXError, nx.parse_graphml, ugly) + + def test_read_attributes_with_groups(self): + data = """\ + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2 + + + + + + + + + + + + + + + + + + + + + + Group 3 + + + + + + + + + + Folder 3 + + + + + + + + + + + + + + + + + + + + + Group 1 + + + + + + + + + + Folder 1 + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + 3 + + + + + + + + + + + + + + + + + + + + + + + + Group 2 + + + + + + + + + + Folder 2 + + + + + + + + + + + + + + + + + + 5 + + + + + + + + + + + + + + + + + + + 6 + + + + + + + + + + + + + + + + + + + + + + + 9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + # verify that nodes / attributes are correctly read when part of a group + fh = io.BytesIO(data.encode("UTF-8")) + G = nx.read_graphml(fh) + data = [x for _, x in G.nodes(data=True)] + assert len(data) == 9 + for node_data in data: + assert node_data["CustomProperty"] != "" + + def test_long_attribute_type(self): + # test that graphs with attr.type="long" (as produced by botch and + # dose3) can be parsed + s = """ + + + + + 4284 + + +""" + fh = io.BytesIO(s.encode("UTF-8")) + G = nx.read_graphml(fh) + expected = [("n1", {"cudfversion": 4284})] + assert sorted(G.nodes(data=True)) == expected + fh.seek(0) + H = nx.parse_graphml(s) + assert sorted(H.nodes(data=True)) == expected + + +class TestWriteGraphML(BaseGraphML): + writer = staticmethod(nx.write_graphml_lxml) + + @classmethod + def setup_class(cls): + BaseGraphML.setup_class() + _ = pytest.importorskip("lxml.etree") + + def test_write_interface(self): + try: + import lxml.etree + + assert nx.write_graphml == nx.write_graphml_lxml + except ImportError: + assert nx.write_graphml == nx.write_graphml_xml + + def test_write_read_simple_directed_graphml(self): + G = self.simple_directed_graph + G.graph["hi"] = "there" + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + def test_GraphMLWriter_add_graphs(self): + gmlw = GraphMLWriter() + G = self.simple_directed_graph + H = G.copy() + gmlw.add_graphs([G, H]) + + def test_write_read_simple_no_prettyprint(self): + G = self.simple_directed_graph + G.graph["hi"] = "there" + G.graph["id"] = "1" + fh = io.BytesIO() + self.writer(G, fh, prettyprint=False) + fh.seek(0) + H = nx.read_graphml(fh) + assert sorted(G.nodes()) == sorted(H.nodes()) + assert sorted(G.edges()) == sorted(H.edges()) + assert sorted(G.edges(data=True)) == sorted(H.edges(data=True)) + self.simple_directed_fh.seek(0) + + def test_write_read_attribute_named_key_ids_graphml(self): + from xml.etree.ElementTree import parse + + G = self.attribute_named_key_ids_graph + fh = io.BytesIO() + self.writer(G, fh, named_key_ids=True) + fh.seek(0) + H = nx.read_graphml(fh) + fh.seek(0) + + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + assert edges_equal(G.edges(data=True), H.edges(data=True)) + self.attribute_named_key_ids_fh.seek(0) + + xml = parse(fh) + # Children are the key elements, and the graph element + children = list(xml.getroot()) + assert len(children) == 4 + + keys = [child.items() for child in children[:3]] + + assert len(keys) == 3 + assert ("id", "edge_prop") in keys[0] + assert ("attr.name", "edge_prop") in keys[0] + assert ("id", "prop2") in keys[1] + assert ("attr.name", "prop2") in keys[1] + assert ("id", "prop1") in keys[2] + assert ("attr.name", "prop1") in keys[2] + + # Confirm the read graph nodes/edge are identical when compared to + # default writing behavior. + default_behavior_fh = io.BytesIO() + nx.write_graphml(G, default_behavior_fh) + default_behavior_fh.seek(0) + H = nx.read_graphml(default_behavior_fh) + + named_key_ids_behavior_fh = io.BytesIO() + nx.write_graphml(G, named_key_ids_behavior_fh, named_key_ids=True) + named_key_ids_behavior_fh.seek(0) + J = nx.read_graphml(named_key_ids_behavior_fh) + + assert all(n1 == n2 for (n1, n2) in zip(H.nodes, J.nodes)) + assert all(e1 == e2 for (e1, e2) in zip(H.edges, J.edges)) + + def test_write_read_attribute_numeric_type_graphml(self): + from xml.etree.ElementTree import parse + + G = self.attribute_numeric_type_graph + fh = io.BytesIO() + self.writer(G, fh, infer_numeric_types=True) + fh.seek(0) + H = nx.read_graphml(fh) + fh.seek(0) + + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + assert edges_equal(G.edges(data=True), H.edges(data=True)) + self.attribute_numeric_type_fh.seek(0) + + xml = parse(fh) + # Children are the key elements, and the graph element + children = list(xml.getroot()) + assert len(children) == 3 + + keys = [child.items() for child in children[:2]] + + assert len(keys) == 2 + assert ("attr.type", "double") in keys[0] + assert ("attr.type", "double") in keys[1] + + def test_more_multigraph_keys(self): + """Writing keys as edge id attributes means keys become strings. + The original keys are stored as data, so read them back in + if `str(key) == edge_id` + This allows the adjacency to remain the same. + """ + G = nx.MultiGraph() + G.add_edges_from([("a", "b", 2), ("a", "b", 3)]) + fd, fname = tempfile.mkstemp() + self.writer(G, fname) + H = nx.read_graphml(fname) + assert H.is_multigraph() + assert edges_equal(G.edges(keys=True), H.edges(keys=True)) + assert G._adj == H._adj + os.close(fd) + os.unlink(fname) + + def test_default_attribute(self): + G = nx.Graph(name="Fred") + G.add_node(1, label=1, color="green") + nx.add_path(G, [0, 1, 2, 3]) + G.add_edge(1, 2, weight=3) + G.graph["node_default"] = {"color": "yellow"} + G.graph["edge_default"] = {"weight": 7} + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh, node_type=int) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + assert G.graph == H.graph + + def test_mixed_type_attributes(self): + G = nx.MultiGraph() + G.add_node("n0", special=False) + G.add_node("n1", special=0) + G.add_edge("n0", "n1", special=False) + G.add_edge("n0", "n1", special=0) + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert not H.nodes["n0"]["special"] + assert H.nodes["n1"]["special"] == 0 + assert not H.edges["n0", "n1", 0]["special"] + assert H.edges["n0", "n1", 1]["special"] == 0 + + def test_str_number_mixed_type_attributes(self): + G = nx.MultiGraph() + G.add_node("n0", special="hello") + G.add_node("n1", special=0) + G.add_edge("n0", "n1", special="hello") + G.add_edge("n0", "n1", special=0) + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert H.nodes["n0"]["special"] == "hello" + assert H.nodes["n1"]["special"] == 0 + assert H.edges["n0", "n1", 0]["special"] == "hello" + assert H.edges["n0", "n1", 1]["special"] == 0 + + def test_mixed_int_type_number_attributes(self): + np = pytest.importorskip("numpy") + G = nx.MultiGraph() + G.add_node("n0", special=np.int64(0)) + G.add_node("n1", special=1) + G.add_edge("n0", "n1", special=np.int64(2)) + G.add_edge("n0", "n1", special=3) + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert H.nodes["n0"]["special"] == 0 + assert H.nodes["n1"]["special"] == 1 + assert H.edges["n0", "n1", 0]["special"] == 2 + assert H.edges["n0", "n1", 1]["special"] == 3 + + def test_multigraph_to_graph(self): + # test converting multigraph to graph if no parallel edges found + G = nx.MultiGraph() + G.add_edges_from([("a", "b", 2), ("b", "c", 3)]) # no multiedges + fd, fname = tempfile.mkstemp() + self.writer(G, fname) + H = nx.read_graphml(fname) + assert not H.is_multigraph() + H = nx.read_graphml(fname, force_multigraph=True) + assert H.is_multigraph() + os.close(fd) + os.unlink(fname) + + # add a multiedge + G.add_edge("a", "b", "e-id") + fd, fname = tempfile.mkstemp() + self.writer(G, fname) + H = nx.read_graphml(fname) + assert H.is_multigraph() + H = nx.read_graphml(fname, force_multigraph=True) + assert H.is_multigraph() + os.close(fd) + os.unlink(fname) + + def test_write_generate_edge_id_from_attribute(self): + from xml.etree.ElementTree import parse + + G = nx.Graph() + G.add_edges_from([("a", "b"), ("b", "c"), ("a", "c")]) + edge_attributes = {e: str(e) for e in G.edges} + nx.set_edge_attributes(G, edge_attributes, "eid") + fd, fname = tempfile.mkstemp() + # set edge_id_from_attribute e.g. "eid" for write_graphml() + self.writer(G, fname, edge_id_from_attribute="eid") + # set edge_id_from_attribute e.g. "eid" for generate_graphml() + generator = nx.generate_graphml(G, edge_id_from_attribute="eid") + + H = nx.read_graphml(fname) + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + # NetworkX adds explicit edge "id" from file as attribute + nx.set_edge_attributes(G, edge_attributes, "id") + assert edges_equal(G.edges(data=True), H.edges(data=True)) + + tree = parse(fname) + children = list(tree.getroot()) + assert len(children) == 2 + edge_ids = [ + edge.attrib["id"] + for edge in tree.getroot().findall( + ".//{http://graphml.graphdrawing.org/xmlns}edge" + ) + ] + # verify edge id value is equal to specified attribute value + assert sorted(edge_ids) == sorted(edge_attributes.values()) + + # check graphml generated from generate_graphml() + data = "".join(generator) + J = nx.parse_graphml(data) + assert sorted(G.nodes()) == sorted(J.nodes()) + assert sorted(G.edges()) == sorted(J.edges()) + # NetworkX adds explicit edge "id" from file as attribute + nx.set_edge_attributes(G, edge_attributes, "id") + assert edges_equal(G.edges(data=True), J.edges(data=True)) + + os.close(fd) + os.unlink(fname) + + def test_multigraph_write_generate_edge_id_from_attribute(self): + from xml.etree.ElementTree import parse + + G = nx.MultiGraph() + G.add_edges_from([("a", "b"), ("b", "c"), ("a", "c"), ("a", "b")]) + edge_attributes = {e: str(e) for e in G.edges} + nx.set_edge_attributes(G, edge_attributes, "eid") + fd, fname = tempfile.mkstemp() + # set edge_id_from_attribute e.g. "eid" for write_graphml() + self.writer(G, fname, edge_id_from_attribute="eid") + # set edge_id_from_attribute e.g. "eid" for generate_graphml() + generator = nx.generate_graphml(G, edge_id_from_attribute="eid") + + H = nx.read_graphml(fname) + assert H.is_multigraph() + H = nx.read_graphml(fname, force_multigraph=True) + assert H.is_multigraph() + + assert nodes_equal(G.nodes(), H.nodes()) + assert edges_equal(G.edges(), H.edges()) + assert sorted(data.get("eid") for u, v, data in H.edges(data=True)) == sorted( + edge_attributes.values() + ) + # NetworkX uses edge_ids as keys in multigraphs if no key + assert sorted(key for u, v, key in H.edges(keys=True)) == sorted( + edge_attributes.values() + ) + + tree = parse(fname) + children = list(tree.getroot()) + assert len(children) == 2 + edge_ids = [ + edge.attrib["id"] + for edge in tree.getroot().findall( + ".//{http://graphml.graphdrawing.org/xmlns}edge" + ) + ] + # verify edge id value is equal to specified attribute value + assert sorted(edge_ids) == sorted(edge_attributes.values()) + + # check graphml generated from generate_graphml() + graphml_data = "".join(generator) + J = nx.parse_graphml(graphml_data) + assert J.is_multigraph() + + assert nodes_equal(G.nodes(), J.nodes()) + assert edges_equal(G.edges(), J.edges()) + assert sorted(data.get("eid") for u, v, data in J.edges(data=True)) == sorted( + edge_attributes.values() + ) + # NetworkX uses edge_ids as keys in multigraphs if no key + assert sorted(key for u, v, key in J.edges(keys=True)) == sorted( + edge_attributes.values() + ) + + os.close(fd) + os.unlink(fname) + + def test_numpy_float64(self): + np = pytest.importorskip("numpy") + wt = np.float64(3.4) + G = nx.Graph([(1, 2, {"weight": wt})]) + fd, fname = tempfile.mkstemp() + self.writer(G, fname) + H = nx.read_graphml(fname, node_type=int) + assert G.edges == H.edges + wtG = G[1][2]["weight"] + wtH = H[1][2]["weight"] + assert wtG == pytest.approx(wtH, abs=1e-6) + assert type(wtG) == np.float64 + assert type(wtH) == float + os.close(fd) + os.unlink(fname) + + def test_numpy_float32(self): + np = pytest.importorskip("numpy") + wt = np.float32(3.4) + G = nx.Graph([(1, 2, {"weight": wt})]) + fd, fname = tempfile.mkstemp() + self.writer(G, fname) + H = nx.read_graphml(fname, node_type=int) + assert G.edges == H.edges + wtG = G[1][2]["weight"] + wtH = H[1][2]["weight"] + assert wtG == pytest.approx(wtH, abs=1e-6) + assert type(wtG) == np.float32 + assert type(wtH) == float + os.close(fd) + os.unlink(fname) + + def test_numpy_float64_inference(self): + np = pytest.importorskip("numpy") + G = self.attribute_numeric_type_graph + G.edges[("n1", "n1")]["weight"] = np.float64(1.1) + fd, fname = tempfile.mkstemp() + self.writer(G, fname, infer_numeric_types=True) + H = nx.read_graphml(fname) + assert G._adj == H._adj + os.close(fd) + os.unlink(fname) + + def test_unicode_attributes(self): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + node_type = str + G.add_edge(name1, "Radiohead", foo=name2) + fd, fname = tempfile.mkstemp() + self.writer(G, fname) + H = nx.read_graphml(fname, node_type=node_type) + assert G._adj == H._adj + os.close(fd) + os.unlink(fname) + + def test_unicode_escape(self): + # test for handling json escaped strings in python 2 Issue #1880 + import json + + a = {"a": '{"a": "123"}'} # an object with many chars to escape + sa = json.dumps(a) + G = nx.Graph() + G.graph["test"] = sa + fh = io.BytesIO() + self.writer(G, fh) + fh.seek(0) + H = nx.read_graphml(fh) + assert G.graph["test"] == H.graph["test"] + + +class TestXMLGraphML(TestWriteGraphML): + writer = staticmethod(nx.write_graphml_xml) + + @classmethod + def setup_class(cls): + TestWriteGraphML.setup_class() + + +def test_exception_for_unsupported_datatype_node_attr(): + """Test that a detailed exception is raised when an attribute is of a type + not supported by GraphML, e.g. a list""" + pytest.importorskip("lxml.etree") + # node attribute + G = nx.Graph() + G.add_node(0, my_list_attribute=[0, 1, 2]) + fh = io.BytesIO() + with pytest.raises(TypeError, match="GraphML does not support"): + nx.write_graphml(G, fh) + + +def test_exception_for_unsupported_datatype_edge_attr(): + """Test that a detailed exception is raised when an attribute is of a type + not supported by GraphML, e.g. a list""" + pytest.importorskip("lxml.etree") + # edge attribute + G = nx.Graph() + G.add_edge(0, 1, my_list_attribute=[0, 1, 2]) + fh = io.BytesIO() + with pytest.raises(TypeError, match="GraphML does not support"): + nx.write_graphml(G, fh) + + +def test_exception_for_unsupported_datatype_graph_attr(): + """Test that a detailed exception is raised when an attribute is of a type + not supported by GraphML, e.g. a list""" + pytest.importorskip("lxml.etree") + # graph attribute + G = nx.Graph() + G.graph["my_list_attribute"] = [0, 1, 2] + fh = io.BytesIO() + with pytest.raises(TypeError, match="GraphML does not support"): + nx.write_graphml(G, fh) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/test_leda.py b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_leda.py new file mode 100644 index 0000000000000000000000000000000000000000..8ac5ecc34bf9b42bd49e316bdc72e0e56c76a616 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_leda.py @@ -0,0 +1,30 @@ +import io + +import networkx as nx + + +class TestLEDA: + def test_parse_leda(self): + data = """#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|""" + G = nx.parse_leda(data) + G = nx.parse_leda(data.split("\n")) + assert sorted(G.nodes()) == ["v1", "v2", "v3", "v4", "v5"] + assert sorted(G.edges(data=True)) == [ + ("v1", "v2", {"label": "4"}), + ("v1", "v3", {"label": "3"}), + ("v2", "v3", {"label": "2"}), + ("v3", "v4", {"label": "3"}), + ("v3", "v5", {"label": "7"}), + ("v4", "v5", {"label": "6"}), + ("v5", "v1", {"label": "foo"}), + ] + + def test_read_LEDA(self): + fh = io.BytesIO() + data = """#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|""" + G = nx.parse_leda(data) + fh.write(data.encode("UTF-8")) + fh.seek(0) + Gin = nx.read_leda(fh) + assert sorted(G.nodes()) == sorted(Gin.nodes()) + assert sorted(G.edges()) == sorted(Gin.edges()) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/test_p2g.py b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_p2g.py new file mode 100644 index 0000000000000000000000000000000000000000..e4c50de7f382f62d4ae6e0cc0443e480487c65e2 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_p2g.py @@ -0,0 +1,62 @@ +import io + +import networkx as nx +from networkx.readwrite.p2g import read_p2g, write_p2g +from networkx.utils import edges_equal + + +class TestP2G: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_node("g") + cls.DG = nx.DiGraph(cls.G) + + def test_read_p2g(self): + s = b"""\ +name +3 4 +a +1 2 +b + +c +0 2 +""" + bytesIO = io.BytesIO(s) + G = read_p2g(bytesIO) + assert G.name == "name" + assert sorted(G) == ["a", "b", "c"] + edges = [(str(u), str(v)) for u, v in G.edges()] + assert edges_equal(G.edges(), [("a", "c"), ("a", "b"), ("c", "a"), ("c", "c")]) + + def test_write_p2g(self): + s = b"""foo +3 2 +1 +1 +2 +2 +3 + +""" + fh = io.BytesIO() + G = nx.DiGraph() + G.name = "foo" + G.add_edges_from([(1, 2), (2, 3)]) + write_p2g(G, fh) + fh.seek(0) + r = fh.read() + assert r == s + + def test_write_read_p2g(self): + fh = io.BytesIO() + G = nx.DiGraph() + G.name = "foo" + G.add_edges_from([("a", "b"), ("b", "c")]) + write_p2g(G, fh) + fh.seek(0) + H = read_p2g(fh) + assert edges_equal(G.edges(), H.edges()) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/test_pajek.py b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_pajek.py new file mode 100644 index 0000000000000000000000000000000000000000..e617c3fc75c2de8c635ef2a963bd61b33286fcb0 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_pajek.py @@ -0,0 +1,130 @@ +""" +Pajek tests +""" +import os +import tempfile + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestPajek: + @classmethod + def setup_class(cls): + cls.data = """*network Tralala\n*vertices 4\n 1 "A1" 0.0938 0.0896 ellipse x_fact 1 y_fact 1\n 2 "Bb" 0.8188 0.2458 ellipse x_fact 1 y_fact 1\n 3 "C" 0.3688 0.7792 ellipse x_fact 1\n 4 "D2" 0.9583 0.8563 ellipse x_fact 1\n*arcs\n1 1 1 h2 0 w 3 c Blue s 3 a1 -130 k1 0.6 a2 -130 k2 0.6 ap 0.5 l "Bezier loop" lc BlueViolet fos 20 lr 58 lp 0.3 la 360\n2 1 1 h2 0 a1 120 k1 1.3 a2 -120 k2 0.3 ap 25 l "Bezier arc" lphi 270 la 180 lr 19 lp 0.5\n1 2 1 h2 0 a1 40 k1 2.8 a2 30 k2 0.8 ap 25 l "Bezier arc" lphi 90 la 0 lp 0.65\n4 2 -1 h2 0 w 1 k1 -2 k2 250 ap 25 l "Circular arc" c Red lc OrangeRed\n3 4 1 p Dashed h2 0 w 2 c OliveGreen ap 25 l "Straight arc" lc PineGreen\n1 3 1 p Dashed h2 0 w 5 k1 -1 k2 -20 ap 25 l "Oval arc" c Brown lc Black\n3 3 -1 h1 6 w 1 h2 12 k1 -2 k2 -15 ap 0.5 l "Circular loop" c Red lc OrangeRed lphi 270 la 180""" + cls.G = nx.MultiDiGraph() + cls.G.add_nodes_from(["A1", "Bb", "C", "D2"]) + cls.G.add_edges_from( + [ + ("A1", "A1"), + ("A1", "Bb"), + ("A1", "C"), + ("Bb", "A1"), + ("C", "C"), + ("C", "D2"), + ("D2", "Bb"), + ] + ) + + cls.G.graph["name"] = "Tralala" + (fd, cls.fname) = tempfile.mkstemp() + with os.fdopen(fd, "wb") as fh: + fh.write(cls.data.encode("UTF-8")) + + @classmethod + def teardown_class(cls): + os.unlink(cls.fname) + + def test_parse_pajek_simple(self): + # Example without node positions or shape + data = """*Vertices 2\n1 "1"\n2 "2"\n*Edges\n1 2\n2 1""" + G = nx.parse_pajek(data) + assert sorted(G.nodes()) == ["1", "2"] + assert edges_equal(G.edges(), [("1", "2"), ("1", "2")]) + + def test_parse_pajek(self): + G = nx.parse_pajek(self.data) + assert sorted(G.nodes()) == ["A1", "Bb", "C", "D2"] + assert edges_equal( + G.edges(), + [ + ("A1", "A1"), + ("A1", "Bb"), + ("A1", "C"), + ("Bb", "A1"), + ("C", "C"), + ("C", "D2"), + ("D2", "Bb"), + ], + ) + + def test_parse_pajet_mat(self): + data = """*Vertices 3\n1 "one"\n2 "two"\n3 "three"\n*Matrix\n1 1 0\n0 1 0\n0 1 0\n""" + G = nx.parse_pajek(data) + assert set(G.nodes()) == {"one", "two", "three"} + assert G.nodes["two"] == {"id": "2"} + assert edges_equal( + set(G.edges()), + {("one", "one"), ("two", "one"), ("two", "two"), ("two", "three")}, + ) + + def test_read_pajek(self): + G = nx.parse_pajek(self.data) + Gin = nx.read_pajek(self.fname) + assert sorted(G.nodes()) == sorted(Gin.nodes()) + assert edges_equal(G.edges(), Gin.edges()) + assert self.G.graph == Gin.graph + for n in G: + assert G.nodes[n] == Gin.nodes[n] + + def test_write_pajek(self): + import io + + G = nx.parse_pajek(self.data) + fh = io.BytesIO() + nx.write_pajek(G, fh) + fh.seek(0) + H = nx.read_pajek(fh) + assert nodes_equal(list(G), list(H)) + assert edges_equal(list(G.edges()), list(H.edges())) + # Graph name is left out for now, therefore it is not tested. + # assert_equal(G.graph, H.graph) + + def test_ignored_attribute(self): + import io + + G = nx.Graph() + fh = io.BytesIO() + G.add_node(1, int_attr=1) + G.add_node(2, empty_attr=" ") + G.add_edge(1, 2, int_attr=2) + G.add_edge(2, 3, empty_attr=" ") + + import warnings + + with warnings.catch_warnings(record=True) as w: + nx.write_pajek(G, fh) + assert len(w) == 4 + + def test_noname(self): + # Make sure we can parse a line such as: *network + # Issue #952 + line = "*network\n" + other_lines = self.data.split("\n")[1:] + data = line + "\n".join(other_lines) + G = nx.parse_pajek(data) + + def test_unicode(self): + import io + + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", foo=name2) + fh = io.BytesIO() + nx.write_pajek(G, fh) + fh.seek(0) + H = nx.read_pajek(fh) + assert nodes_equal(list(G), list(H)) + assert edges_equal(list(G.edges()), list(H.edges())) + assert G.graph == H.graph diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/test_sparse6.py b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_sparse6.py new file mode 100644 index 0000000000000000000000000000000000000000..c8b5e4de93ed16139c4766c3127f9397e0a4ff39 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_sparse6.py @@ -0,0 +1,173 @@ +import tempfile +from io import BytesIO + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestSparseGraph6: + def test_from_sparse6_bytes(self): + data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM" + G = nx.from_sparse6_bytes(data) + assert nodes_equal( + sorted(G.nodes()), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17], + ) + assert edges_equal( + G.edges(), + [ + (0, 1), + (0, 2), + (0, 3), + (1, 12), + (1, 14), + (2, 13), + (2, 15), + (3, 16), + (3, 17), + (4, 7), + (4, 9), + (4, 11), + (5, 6), + (5, 8), + (5, 9), + (6, 10), + (6, 11), + (7, 8), + (7, 10), + (8, 12), + (9, 15), + (10, 14), + (11, 13), + (12, 16), + (13, 17), + (14, 17), + (15, 16), + ], + ) + + def test_from_bytes_multigraph_graph(self): + graph_data = b":An" + G = nx.from_sparse6_bytes(graph_data) + assert type(G) == nx.Graph + multigraph_data = b":Ab" + M = nx.from_sparse6_bytes(multigraph_data) + assert type(M) == nx.MultiGraph + + def test_read_sparse6(self): + data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM" + G = nx.from_sparse6_bytes(data) + fh = BytesIO(data) + Gin = nx.read_sparse6(fh) + assert nodes_equal(G.nodes(), Gin.nodes()) + assert edges_equal(G.edges(), Gin.edges()) + + def test_read_many_graph6(self): + # Read many graphs into list + data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM\n" b":Q___dCfDEdcEgcbEGbFIaJ`JaHN`IM" + fh = BytesIO(data) + glist = nx.read_sparse6(fh) + assert len(glist) == 2 + for G in glist: + assert nodes_equal( + G.nodes(), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17], + ) + + +class TestWriteSparse6: + """Unit tests for writing graphs in the sparse6 format. + + Most of the test cases were checked against the sparse6 encoder in Sage. + + """ + + def test_null_graph(self): + G = nx.null_graph() + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:?\n" + + def test_trivial_graph(self): + G = nx.trivial_graph() + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:@\n" + + def test_empty_graph(self): + G = nx.empty_graph(5) + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:D\n" + + def test_large_empty_graph(self): + G = nx.empty_graph(68) + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:~?@C\n" + + def test_very_large_empty_graph(self): + G = nx.empty_graph(258049) + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:~~???~?@\n" + + def test_complete_graph(self): + G = nx.complete_graph(4) + result = BytesIO() + nx.write_sparse6(G, result) + assert result.getvalue() == b">>sparse6<<:CcKI\n" + + def test_no_header(self): + G = nx.complete_graph(4) + result = BytesIO() + nx.write_sparse6(G, result, header=False) + assert result.getvalue() == b":CcKI\n" + + def test_padding(self): + codes = (b":Cdv", b":DaYn", b":EaYnN", b":FaYnL", b":GaYnLz") + for n, code in enumerate(codes, start=4): + G = nx.path_graph(n) + result = BytesIO() + nx.write_sparse6(G, result, header=False) + assert result.getvalue() == code + b"\n" + + def test_complete_bipartite(self): + G = nx.complete_bipartite_graph(6, 9) + result = BytesIO() + nx.write_sparse6(G, result) + # Compared with sage + expected = b">>sparse6<<:Nk" + b"?G`cJ" * 9 + b"\n" + assert result.getvalue() == expected + + def test_read_write_inverse(self): + for i in list(range(13)) + [31, 47, 62, 63, 64, 72]: + m = min(2 * i, i * i // 2) + g = nx.random_graphs.gnm_random_graph(i, m, seed=i) + gstr = BytesIO() + nx.write_sparse6(g, gstr, header=False) + # Strip the trailing newline. + gstr = gstr.getvalue().rstrip() + g2 = nx.from_sparse6_bytes(gstr) + assert g2.order() == g.order() + assert edges_equal(g2.edges(), g.edges()) + + def test_no_directed_graphs(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.write_sparse6(nx.DiGraph(), BytesIO()) + + def test_write_path(self): + # On Windows, we can't reopen a file that is open + # So, for test we get a valid name from tempfile but close it. + with tempfile.NamedTemporaryFile() as f: + fullfilename = f.name + # file should be closed now, so write_sparse6 can open it + nx.write_sparse6(nx.null_graph(), fullfilename) + fh = open(fullfilename, mode="rb") + assert fh.read() == b">>sparse6<<:?\n" + fh.close() + import os + + os.remove(fullfilename) diff --git a/phivenv/Lib/site-packages/networkx/readwrite/tests/test_text.py b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_text.py new file mode 100644 index 0000000000000000000000000000000000000000..0f788280d6d1e736910c9c9cf19abd96a48a1cba --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/tests/test_text.py @@ -0,0 +1,1809 @@ +import random +from itertools import product +from textwrap import dedent + +import pytest + +import networkx as nx + + +def test_forest_str_directed(): + # Create a directed forest with labels + graph = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + for node in graph.nodes: + graph.nodes[node]["label"] = "node_" + chr(ord("a") + node) + + node_target = dedent( + """ + ╙── 0 + ├─╼ 1 + │ ├─╼ 3 + │ └─╼ 4 + └─╼ 2 + ├─╼ 5 + └─╼ 6 + """ + ).strip() + + label_target = dedent( + """ + ╙── node_a + ├─╼ node_b + │ ├─╼ node_d + │ └─╼ node_e + └─╼ node_c + ├─╼ node_f + └─╼ node_g + """ + ).strip() + + # Basic node case + ret = nx.forest_str(graph, with_labels=False) + print(ret) + assert ret == node_target + + # Basic label case + ret = nx.forest_str(graph, with_labels=True) + print(ret) + assert ret == label_target + + # Custom write function case + lines = [] + ret = nx.forest_str(graph, write=lines.append, with_labels=False) + assert ret is None + assert lines == node_target.split("\n") + + # Smoke test to ensure passing the print function works. To properly test + # this case we would need to capture stdout. (for potential reference + # implementation see :class:`ubelt.util_stream.CaptureStdout`) + ret = nx.forest_str(graph, write=print) + assert ret is None + + +def test_write_network_text_empty_graph(): + def _graph_str(g, **kw): + printbuf = [] + nx.write_network_text(g, printbuf.append, end="", **kw) + return "\n".join(printbuf) + + assert _graph_str(nx.DiGraph()) == "╙" + assert _graph_str(nx.Graph()) == "╙" + assert _graph_str(nx.DiGraph(), ascii_only=True) == "+" + assert _graph_str(nx.Graph(), ascii_only=True) == "+" + + +def test_write_network_text_within_forest_glyph(): + g = nx.DiGraph() + g.add_nodes_from([1, 2, 3, 4]) + g.add_edge(2, 4) + lines = [] + write = lines.append + nx.write_network_text(g, path=write, end="") + nx.write_network_text(g, path=write, ascii_only=True, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╟── 1 + ╟── 2 + ╎ └─╼ 4 + ╙── 3 + +-- 1 + +-- 2 + : L-> 4 + +-- 3 + """ + ).strip() + assert text == target + + +def test_forest_str_directed_multi_tree(): + tree1 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + tree2 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + forest = nx.disjoint_union_all([tree1, tree2]) + ret = nx.forest_str(forest) + print(ret) + + target = dedent( + """ + ╟── 0 + ╎ ├─╼ 1 + ╎ │ ├─╼ 3 + ╎ │ └─╼ 4 + ╎ └─╼ 2 + ╎ ├─╼ 5 + ╎ └─╼ 6 + ╙── 7 + ├─╼ 8 + │ ├─╼ 10 + │ └─╼ 11 + └─╼ 9 + ├─╼ 12 + └─╼ 13 + """ + ).strip() + assert ret == target + + tree3 = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + forest = nx.disjoint_union_all([tree1, tree2, tree3]) + ret = nx.forest_str(forest, sources=[0, 14, 7]) + print(ret) + + target = dedent( + """ + ╟── 0 + ╎ ├─╼ 1 + ╎ │ ├─╼ 3 + ╎ │ └─╼ 4 + ╎ └─╼ 2 + ╎ ├─╼ 5 + ╎ └─╼ 6 + ╟── 14 + ╎ ├─╼ 15 + ╎ │ ├─╼ 17 + ╎ │ └─╼ 18 + ╎ └─╼ 16 + ╎ ├─╼ 19 + ╎ └─╼ 20 + ╙── 7 + ├─╼ 8 + │ ├─╼ 10 + │ └─╼ 11 + └─╼ 9 + ├─╼ 12 + └─╼ 13 + """ + ).strip() + assert ret == target + + ret = nx.forest_str(forest, sources=[0, 14, 7], ascii_only=True) + print(ret) + + target = dedent( + """ + +-- 0 + : |-> 1 + : | |-> 3 + : | L-> 4 + : L-> 2 + : |-> 5 + : L-> 6 + +-- 14 + : |-> 15 + : | |-> 17 + : | L-> 18 + : L-> 16 + : |-> 19 + : L-> 20 + +-- 7 + |-> 8 + | |-> 10 + | L-> 11 + L-> 9 + |-> 12 + L-> 13 + """ + ).strip() + assert ret == target + + +def test_forest_str_undirected_multi_tree(): + tree1 = nx.balanced_tree(r=2, h=2, create_using=nx.Graph) + tree2 = nx.balanced_tree(r=2, h=2, create_using=nx.Graph) + tree2 = nx.relabel_nodes(tree2, {n: n + len(tree1) for n in tree2.nodes}) + forest = nx.union(tree1, tree2) + ret = nx.forest_str(forest, sources=[0, 7]) + print(ret) + + target = dedent( + """ + ╟── 0 + ╎ ├── 1 + ╎ │ ├── 3 + ╎ │ └── 4 + ╎ └── 2 + ╎ ├── 5 + ╎ └── 6 + ╙── 7 + ├── 8 + │ ├── 10 + │ └── 11 + └── 9 + ├── 12 + └── 13 + """ + ).strip() + assert ret == target + + ret = nx.forest_str(forest, sources=[0, 7], ascii_only=True) + print(ret) + + target = dedent( + """ + +-- 0 + : |-- 1 + : | |-- 3 + : | L-- 4 + : L-- 2 + : |-- 5 + : L-- 6 + +-- 7 + |-- 8 + | |-- 10 + | L-- 11 + L-- 9 + |-- 12 + L-- 13 + """ + ).strip() + assert ret == target + + +def test_forest_str_undirected(): + # Create a directed forest + graph = nx.balanced_tree(r=2, h=2, create_using=nx.Graph) + + # arbitrary starting point + nx.forest_str(graph) + + node_target0 = dedent( + """ + ╙── 0 + ├── 1 + │ ├── 3 + │ └── 4 + └── 2 + ├── 5 + └── 6 + """ + ).strip() + + # defined starting point + ret = nx.forest_str(graph, sources=[0]) + print(ret) + assert ret == node_target0 + + # defined starting point + node_target2 = dedent( + """ + ╙── 2 + ├── 0 + │ └── 1 + │ ├── 3 + │ └── 4 + ├── 5 + └── 6 + """ + ).strip() + ret = nx.forest_str(graph, sources=[2]) + print(ret) + assert ret == node_target2 + + +def test_forest_str_errors(): + ugraph = nx.complete_graph(3, create_using=nx.Graph) + + with pytest.raises(nx.NetworkXNotImplemented): + nx.forest_str(ugraph) + + dgraph = nx.complete_graph(3, create_using=nx.DiGraph) + + with pytest.raises(nx.NetworkXNotImplemented): + nx.forest_str(dgraph) + + +def test_forest_str_overspecified_sources(): + """ + When sources are directly specified, we won't be able to determine when we + are in the last component, so there will always be a trailing, leftmost + pipe. + """ + graph = nx.disjoint_union_all( + [ + nx.balanced_tree(r=2, h=1, create_using=nx.DiGraph), + nx.balanced_tree(r=1, h=2, create_using=nx.DiGraph), + nx.balanced_tree(r=2, h=1, create_using=nx.DiGraph), + ] + ) + + # defined starting point + target1 = dedent( + """ + ╟── 0 + ╎ ├─╼ 1 + ╎ └─╼ 2 + ╟── 3 + ╎ └─╼ 4 + ╎ └─╼ 5 + ╟── 6 + ╎ ├─╼ 7 + ╎ └─╼ 8 + """ + ).strip() + + target2 = dedent( + """ + ╟── 0 + ╎ ├─╼ 1 + ╎ └─╼ 2 + ╟── 3 + ╎ └─╼ 4 + ╎ └─╼ 5 + ╙── 6 + ├─╼ 7 + └─╼ 8 + """ + ).strip() + + lines = [] + nx.forest_str(graph, write=lines.append, sources=graph.nodes) + got1 = "\n".join(lines) + print("got1: ") + print(got1) + + lines = [] + nx.forest_str(graph, write=lines.append) + got2 = "\n".join(lines) + print("got2: ") + print(got2) + + assert got1 == target1 + assert got2 == target2 + + +def test_write_network_text_iterative_add_directed_edges(): + """ + Walk through the cases going from a disconnected to fully connected graph + """ + graph = nx.DiGraph() + graph.add_nodes_from([1, 2, 3, 4]) + lines = [] + write = lines.append + write("--- initial state ---") + nx.write_network_text(graph, path=write, end="") + for i, j in product(graph.nodes, graph.nodes): + write(f"--- add_edge({i}, {j}) ---") + graph.add_edge(i, j) + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + # defined starting point + target = dedent( + """ + --- initial state --- + ╟── 1 + ╟── 2 + ╟── 3 + ╙── 4 + --- add_edge(1, 1) --- + ╟── 1 ╾ 1 + ╎ └─╼ ... + ╟── 2 + ╟── 3 + ╙── 4 + --- add_edge(1, 2) --- + ╟── 1 ╾ 1 + ╎ ├─╼ 2 + ╎ └─╼ ... + ╟── 3 + ╙── 4 + --- add_edge(1, 3) --- + ╟── 1 ╾ 1 + ╎ ├─╼ 2 + ╎ ├─╼ 3 + ╎ └─╼ ... + ╙── 4 + --- add_edge(1, 4) --- + ╙── 1 ╾ 1 + ├─╼ 2 + ├─╼ 3 + ├─╼ 4 + └─╼ ... + --- add_edge(2, 1) --- + ╙── 2 ╾ 1 + └─╼ 1 ╾ 1 + ├─╼ 3 + ├─╼ 4 + └─╼ ... + --- add_edge(2, 2) --- + ╙── 1 ╾ 1, 2 + ├─╼ 2 ╾ 2 + │ └─╼ ... + ├─╼ 3 + ├─╼ 4 + └─╼ ... + --- add_edge(2, 3) --- + ╙── 1 ╾ 1, 2 + ├─╼ 2 ╾ 2 + │ ├─╼ 3 ╾ 1 + │ └─╼ ... + ├─╼ 4 + └─╼ ... + --- add_edge(2, 4) --- + ╙── 1 ╾ 1, 2 + ├─╼ 2 ╾ 2 + │ ├─╼ 3 ╾ 1 + │ ├─╼ 4 ╾ 1 + │ └─╼ ... + └─╼ ... + --- add_edge(3, 1) --- + ╙── 2 ╾ 1, 2 + ├─╼ 1 ╾ 1, 3 + │ ├─╼ 3 ╾ 2 + │ │ └─╼ ... + │ ├─╼ 4 ╾ 2 + │ └─╼ ... + └─╼ ... + --- add_edge(3, 2) --- + ╙── 3 ╾ 1, 2 + ├─╼ 1 ╾ 1, 2 + │ ├─╼ 2 ╾ 2, 3 + │ │ ├─╼ 4 ╾ 1 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(3, 3) --- + ╙── 1 ╾ 1, 2, 3 + ├─╼ 2 ╾ 2, 3 + │ ├─╼ 3 ╾ 1, 3 + │ │ └─╼ ... + │ ├─╼ 4 ╾ 1 + │ └─╼ ... + └─╼ ... + --- add_edge(3, 4) --- + ╙── 1 ╾ 1, 2, 3 + ├─╼ 2 ╾ 2, 3 + │ ├─╼ 3 ╾ 1, 3 + │ │ ├─╼ 4 ╾ 1, 2 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 1) --- + ╙── 2 ╾ 1, 2, 3 + ├─╼ 1 ╾ 1, 3, 4 + │ ├─╼ 3 ╾ 2, 3 + │ │ ├─╼ 4 ╾ 1, 2 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 2) --- + ╙── 3 ╾ 1, 2, 3 + ├─╼ 1 ╾ 1, 2, 4 + │ ├─╼ 2 ╾ 2, 3, 4 + │ │ ├─╼ 4 ╾ 1, 3 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 3) --- + ╙── 4 ╾ 1, 2, 3 + ├─╼ 1 ╾ 1, 2, 3 + │ ├─╼ 2 ╾ 2, 3, 4 + │ │ ├─╼ 3 ╾ 1, 3, 4 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 4) --- + ╙── 1 ╾ 1, 2, 3, 4 + ├─╼ 2 ╾ 2, 3, 4 + │ ├─╼ 3 ╾ 1, 3, 4 + │ │ ├─╼ 4 ╾ 1, 2, 4 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + """ + ).strip() + assert target == text + + +def test_write_network_text_iterative_add_undirected_edges(): + """ + Walk through the cases going from a disconnected to fully connected graph + """ + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4]) + lines = [] + write = lines.append + write("--- initial state ---") + nx.write_network_text(graph, path=write, end="") + for i, j in product(graph.nodes, graph.nodes): + if i == j: + continue + write(f"--- add_edge({i}, {j}) ---") + graph.add_edge(i, j) + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- initial state --- + ╟── 1 + ╟── 2 + ╟── 3 + ╙── 4 + --- add_edge(1, 2) --- + ╟── 3 + ╟── 4 + ╙── 1 + └── 2 + --- add_edge(1, 3) --- + ╟── 4 + ╙── 2 + └── 1 + └── 3 + --- add_edge(1, 4) --- + ╙── 2 + └── 1 + ├── 3 + └── 4 + --- add_edge(2, 1) --- + ╙── 2 + └── 1 + ├── 3 + └── 4 + --- add_edge(2, 3) --- + ╙── 4 + └── 1 + ├── 2 + │ └── 3 ─ 1 + └── ... + --- add_edge(2, 4) --- + ╙── 3 + ├── 1 + │ ├── 2 ─ 3 + │ │ └── 4 ─ 1 + │ └── ... + └── ... + --- add_edge(3, 1) --- + ╙── 3 + ├── 1 + │ ├── 2 ─ 3 + │ │ └── 4 ─ 1 + │ └── ... + └── ... + --- add_edge(3, 2) --- + ╙── 3 + ├── 1 + │ ├── 2 ─ 3 + │ │ └── 4 ─ 1 + │ └── ... + └── ... + --- add_edge(3, 4) --- + ╙── 1 + ├── 2 + │ ├── 3 ─ 1 + │ │ └── 4 ─ 1, 2 + │ └── ... + └── ... + --- add_edge(4, 1) --- + ╙── 1 + ├── 2 + │ ├── 3 ─ 1 + │ │ └── 4 ─ 1, 2 + │ └── ... + └── ... + --- add_edge(4, 2) --- + ╙── 1 + ├── 2 + │ ├── 3 ─ 1 + │ │ └── 4 ─ 1, 2 + │ └── ... + └── ... + --- add_edge(4, 3) --- + ╙── 1 + ├── 2 + │ ├── 3 ─ 1 + │ │ └── 4 ─ 1, 2 + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_iterative_add_random_directed_edges(): + """ + Walk through the cases going from a disconnected to fully connected graph + """ + + rng = random.Random(724466096) + graph = nx.DiGraph() + graph.add_nodes_from([1, 2, 3, 4, 5]) + possible_edges = list(product(graph.nodes, graph.nodes)) + rng.shuffle(possible_edges) + graph.add_edges_from(possible_edges[0:8]) + lines = [] + write = lines.append + write("--- initial state ---") + nx.write_network_text(graph, path=write, end="") + for i, j in possible_edges[8:12]: + write(f"--- add_edge({i}, {j}) ---") + graph.add_edge(i, j) + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- initial state --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 + │ │ ├─╼ 1 ╾ 1 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(4, 1) --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 + │ │ ├─╼ 1 ╾ 1, 4 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(2, 1) --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 + │ │ ├─╼ 1 ╾ 1, 4, 2 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(5, 2) --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2, 5 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 + │ │ ├─╼ 1 ╾ 1, 4, 2 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- add_edge(1, 5) --- + ╙── 3 ╾ 5 + └─╼ 2 ╾ 2, 5 + ├─╼ 4 ╾ 4 + │ ├─╼ 5 ╾ 1 + │ │ ├─╼ 1 ╾ 1, 4, 2 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + + """ + ).strip() + assert target == text + + +def test_write_network_text_nearly_forest(): + g = nx.DiGraph() + g.add_edge(1, 2) + g.add_edge(1, 5) + g.add_edge(2, 3) + g.add_edge(3, 4) + g.add_edge(5, 6) + g.add_edge(6, 7) + g.add_edge(6, 8) + orig = g.copy() + g.add_edge(1, 8) # forward edge + g.add_edge(4, 2) # back edge + g.add_edge(6, 3) # cross edge + lines = [] + write = lines.append + write("--- directed case ---") + nx.write_network_text(orig, path=write, end="") + write("--- add (1, 8), (4, 2), (6, 3) ---") + nx.write_network_text(g, path=write, end="") + write("--- undirected case ---") + nx.write_network_text(orig.to_undirected(), path=write, sources=[1], end="") + write("--- add (1, 8), (4, 2), (6, 3) ---") + nx.write_network_text(g.to_undirected(), path=write, sources=[1], end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- directed case --- + ╙── 1 + ├─╼ 2 + │ └─╼ 3 + │ └─╼ 4 + └─╼ 5 + └─╼ 6 + ├─╼ 7 + └─╼ 8 + --- add (1, 8), (4, 2), (6, 3) --- + ╙── 1 + ├─╼ 2 ╾ 4 + │ └─╼ 3 ╾ 6 + │ └─╼ 4 + │ └─╼ ... + ├─╼ 5 + │ └─╼ 6 + │ ├─╼ 7 + │ ├─╼ 8 ╾ 1 + │ └─╼ ... + └─╼ ... + --- undirected case --- + ╙── 1 + ├── 2 + │ └── 3 + │ └── 4 + └── 5 + └── 6 + ├── 7 + └── 8 + --- add (1, 8), (4, 2), (6, 3) --- + ╙── 1 + ├── 2 + │ ├── 3 + │ │ ├── 4 ─ 2 + │ │ └── 6 + │ │ ├── 5 ─ 1 + │ │ ├── 7 + │ │ └── 8 ─ 1 + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_complete_graph_ascii_only(): + graph = nx.generators.complete_graph(5, create_using=nx.DiGraph) + lines = [] + write = lines.append + write("--- directed case ---") + nx.write_network_text(graph, path=write, ascii_only=True, end="") + write("--- undirected case ---") + nx.write_network_text(graph.to_undirected(), path=write, ascii_only=True, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- directed case --- + +-- 0 <- 1, 2, 3, 4 + |-> 1 <- 2, 3, 4 + | |-> 2 <- 0, 3, 4 + | | |-> 3 <- 0, 1, 4 + | | | |-> 4 <- 0, 1, 2 + | | | | L-> ... + | | | L-> ... + | | L-> ... + | L-> ... + L-> ... + --- undirected case --- + +-- 0 + |-- 1 + | |-- 2 - 0 + | | |-- 3 - 0, 1 + | | | L-- 4 - 0, 1, 2 + | | L-- ... + | L-- ... + L-- ... + """ + ).strip() + assert target == text + + +def test_write_network_text_with_labels(): + graph = nx.generators.complete_graph(5, create_using=nx.DiGraph) + for n in graph.nodes: + graph.nodes[n]["label"] = f"Node(n={n})" + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, with_labels=True, ascii_only=False, end="") + text = "\n".join(lines) + print(text) + # Non trees with labels can get somewhat out of hand with network text + # because we need to immediately show every non-tree edge to the right + target = dedent( + """ + ╙── Node(n=0) ╾ Node(n=1), Node(n=2), Node(n=3), Node(n=4) + ├─╼ Node(n=1) ╾ Node(n=2), Node(n=3), Node(n=4) + │ ├─╼ Node(n=2) ╾ Node(n=0), Node(n=3), Node(n=4) + │ │ ├─╼ Node(n=3) ╾ Node(n=0), Node(n=1), Node(n=4) + │ │ │ ├─╼ Node(n=4) ╾ Node(n=0), Node(n=1), Node(n=2) + │ │ │ │ └─╼ ... + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + """ + ).strip() + assert target == text + + +def test_write_network_text_complete_graphs(): + lines = [] + write = lines.append + for k in [0, 1, 2, 3, 4, 5]: + g = nx.generators.complete_graph(k) + write(f"--- undirected k={k} ---") + nx.write_network_text(g, path=write, end="") + + for k in [0, 1, 2, 3, 4, 5]: + g = nx.generators.complete_graph(k, nx.DiGraph) + write(f"--- directed k={k} ---") + nx.write_network_text(g, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- undirected k=0 --- + ╙ + --- undirected k=1 --- + ╙── 0 + --- undirected k=2 --- + ╙── 0 + └── 1 + --- undirected k=3 --- + ╙── 0 + ├── 1 + │ └── 2 ─ 0 + └── ... + --- undirected k=4 --- + ╙── 0 + ├── 1 + │ ├── 2 ─ 0 + │ │ └── 3 ─ 0, 1 + │ └── ... + └── ... + --- undirected k=5 --- + ╙── 0 + ├── 1 + │ ├── 2 ─ 0 + │ │ ├── 3 ─ 0, 1 + │ │ │ └── 4 ─ 0, 1, 2 + │ │ └── ... + │ └── ... + └── ... + --- directed k=0 --- + ╙ + --- directed k=1 --- + ╙── 0 + --- directed k=2 --- + ╙── 0 ╾ 1 + └─╼ 1 + └─╼ ... + --- directed k=3 --- + ╙── 0 ╾ 1, 2 + ├─╼ 1 ╾ 2 + │ ├─╼ 2 ╾ 0 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- directed k=4 --- + ╙── 0 ╾ 1, 2, 3 + ├─╼ 1 ╾ 2, 3 + │ ├─╼ 2 ╾ 0, 3 + │ │ ├─╼ 3 ╾ 0, 1 + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- directed k=5 --- + ╙── 0 ╾ 1, 2, 3, 4 + ├─╼ 1 ╾ 2, 3, 4 + │ ├─╼ 2 ╾ 0, 3, 4 + │ │ ├─╼ 3 ╾ 0, 1, 4 + │ │ │ ├─╼ 4 ╾ 0, 1, 2 + │ │ │ │ └─╼ ... + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + """ + ).strip() + assert target == text + + +def test_write_network_text_multiple_sources(): + g = nx.DiGraph() + g.add_edge(1, 2) + g.add_edge(1, 3) + g.add_edge(2, 4) + g.add_edge(3, 5) + g.add_edge(3, 6) + g.add_edge(5, 4) + g.add_edge(4, 1) + g.add_edge(1, 5) + lines = [] + write = lines.append + # Use each node as the starting point to demonstrate how the representation + # changes. + nodes = sorted(g.nodes()) + for n in nodes: + write(f"--- source node: {n} ---") + nx.write_network_text(g, path=write, sources=[n], end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- source node: 1 --- + ╙── 1 ╾ 4 + ├─╼ 2 + │ └─╼ 4 ╾ 5 + │ └─╼ ... + ├─╼ 3 + │ ├─╼ 5 ╾ 1 + │ │ └─╼ ... + │ └─╼ 6 + └─╼ ... + --- source node: 2 --- + ╙── 2 ╾ 1 + └─╼ 4 ╾ 5 + └─╼ 1 + ├─╼ 3 + │ ├─╼ 5 ╾ 1 + │ │ └─╼ ... + │ └─╼ 6 + └─╼ ... + --- source node: 3 --- + ╙── 3 ╾ 1 + ├─╼ 5 ╾ 1 + │ └─╼ 4 ╾ 2 + │ └─╼ 1 + │ ├─╼ 2 + │ │ └─╼ ... + │ └─╼ ... + └─╼ 6 + --- source node: 4 --- + ╙── 4 ╾ 2, 5 + └─╼ 1 + ├─╼ 2 + │ └─╼ ... + ├─╼ 3 + │ ├─╼ 5 ╾ 1 + │ │ └─╼ ... + │ └─╼ 6 + └─╼ ... + --- source node: 5 --- + ╙── 5 ╾ 3, 1 + └─╼ 4 ╾ 2 + └─╼ 1 + ├─╼ 2 + │ └─╼ ... + ├─╼ 3 + │ ├─╼ 6 + │ └─╼ ... + └─╼ ... + --- source node: 6 --- + ╙── 6 ╾ 3 + """ + ).strip() + assert target == text + + +def test_write_network_text_star_graph(): + graph = nx.star_graph(5, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╙── 1 + └── 0 + ├── 2 + ├── 3 + ├── 4 + └── 5 + """ + ).strip() + assert target == text + + +def test_write_network_text_path_graph(): + graph = nx.path_graph(3, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╙── 0 + └── 1 + └── 2 + """ + ).strip() + assert target == text + + +def test_write_network_text_lollipop_graph(): + graph = nx.lollipop_graph(4, 2, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╙── 5 + └── 4 + └── 3 + ├── 0 + │ ├── 1 ─ 3 + │ │ └── 2 ─ 0, 3 + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_wheel_graph(): + graph = nx.wheel_graph(7, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╙── 1 + ├── 0 + │ ├── 2 ─ 1 + │ │ └── 3 ─ 0 + │ │ └── 4 ─ 0 + │ │ └── 5 ─ 0 + │ │ └── 6 ─ 0, 1 + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_circular_ladder_graph(): + graph = nx.circular_ladder_graph(4, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╙── 0 + ├── 1 + │ ├── 2 + │ │ ├── 3 ─ 0 + │ │ │ └── 7 + │ │ │ ├── 6 ─ 2 + │ │ │ │ └── 5 ─ 1 + │ │ │ │ └── 4 ─ 0, 7 + │ │ │ └── ... + │ │ └── ... + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_dorogovtsev_goltsev_mendes_graph(): + graph = nx.dorogovtsev_goltsev_mendes_graph(4, create_using=nx.Graph) + lines = [] + write = lines.append + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + ╙── 15 + ├── 0 + │ ├── 1 ─ 15 + │ │ ├── 2 ─ 0 + │ │ │ ├── 4 ─ 0 + │ │ │ │ ├── 9 ─ 0 + │ │ │ │ │ ├── 22 ─ 0 + │ │ │ │ │ └── 38 ─ 4 + │ │ │ │ ├── 13 ─ 2 + │ │ │ │ │ ├── 34 ─ 2 + │ │ │ │ │ └── 39 ─ 4 + │ │ │ │ ├── 18 ─ 0 + │ │ │ │ ├── 30 ─ 2 + │ │ │ │ └── ... + │ │ │ ├── 5 ─ 1 + │ │ │ │ ├── 12 ─ 1 + │ │ │ │ │ ├── 29 ─ 1 + │ │ │ │ │ └── 40 ─ 5 + │ │ │ │ ├── 14 ─ 2 + │ │ │ │ │ ├── 35 ─ 2 + │ │ │ │ │ └── 41 ─ 5 + │ │ │ │ ├── 25 ─ 1 + │ │ │ │ ├── 31 ─ 2 + │ │ │ │ └── ... + │ │ │ ├── 7 ─ 0 + │ │ │ │ ├── 20 ─ 0 + │ │ │ │ └── 32 ─ 2 + │ │ │ ├── 10 ─ 1 + │ │ │ │ ├── 27 ─ 1 + │ │ │ │ └── 33 ─ 2 + │ │ │ ├── 16 ─ 0 + │ │ │ ├── 23 ─ 1 + │ │ │ └── ... + │ │ ├── 3 ─ 0 + │ │ │ ├── 8 ─ 0 + │ │ │ │ ├── 21 ─ 0 + │ │ │ │ └── 36 ─ 3 + │ │ │ ├── 11 ─ 1 + │ │ │ │ ├── 28 ─ 1 + │ │ │ │ └── 37 ─ 3 + │ │ │ ├── 17 ─ 0 + │ │ │ ├── 24 ─ 1 + │ │ │ └── ... + │ │ ├── 6 ─ 0 + │ │ │ ├── 19 ─ 0 + │ │ │ └── 26 ─ 1 + │ │ └── ... + │ └── ... + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_tree_max_depth(): + orig = nx.balanced_tree(r=1, h=3, create_using=nx.DiGraph) + lines = [] + write = lines.append + write("--- directed case, max_depth=0 ---") + nx.write_network_text(orig, path=write, end="", max_depth=0) + write("--- directed case, max_depth=1 ---") + nx.write_network_text(orig, path=write, end="", max_depth=1) + write("--- directed case, max_depth=2 ---") + nx.write_network_text(orig, path=write, end="", max_depth=2) + write("--- directed case, max_depth=3 ---") + nx.write_network_text(orig, path=write, end="", max_depth=3) + write("--- directed case, max_depth=4 ---") + nx.write_network_text(orig, path=write, end="", max_depth=4) + write("--- undirected case, max_depth=0 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=0) + write("--- undirected case, max_depth=1 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=1) + write("--- undirected case, max_depth=2 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=2) + write("--- undirected case, max_depth=3 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=3) + write("--- undirected case, max_depth=4 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=4) + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- directed case, max_depth=0 --- + ╙ ... + --- directed case, max_depth=1 --- + ╙── 0 + └─╼ ... + --- directed case, max_depth=2 --- + ╙── 0 + └─╼ 1 + └─╼ ... + --- directed case, max_depth=3 --- + ╙── 0 + └─╼ 1 + └─╼ 2 + └─╼ ... + --- directed case, max_depth=4 --- + ╙── 0 + └─╼ 1 + └─╼ 2 + └─╼ 3 + --- undirected case, max_depth=0 --- + ╙ ... + --- undirected case, max_depth=1 --- + ╙── 0 ─ 1 + └── ... + --- undirected case, max_depth=2 --- + ╙── 0 + └── 1 ─ 2 + └── ... + --- undirected case, max_depth=3 --- + ╙── 0 + └── 1 + └── 2 ─ 3 + └── ... + --- undirected case, max_depth=4 --- + ╙── 0 + └── 1 + └── 2 + └── 3 + """ + ).strip() + assert target == text + + +def test_write_network_text_graph_max_depth(): + orig = nx.erdos_renyi_graph(10, 0.15, directed=True, seed=40392) + lines = [] + write = lines.append + write("--- directed case, max_depth=None ---") + nx.write_network_text(orig, path=write, end="", max_depth=None) + write("--- directed case, max_depth=0 ---") + nx.write_network_text(orig, path=write, end="", max_depth=0) + write("--- directed case, max_depth=1 ---") + nx.write_network_text(orig, path=write, end="", max_depth=1) + write("--- directed case, max_depth=2 ---") + nx.write_network_text(orig, path=write, end="", max_depth=2) + write("--- directed case, max_depth=3 ---") + nx.write_network_text(orig, path=write, end="", max_depth=3) + write("--- undirected case, max_depth=None ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=None) + write("--- undirected case, max_depth=0 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=0) + write("--- undirected case, max_depth=1 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=1) + write("--- undirected case, max_depth=2 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=2) + write("--- undirected case, max_depth=3 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=3) + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- directed case, max_depth=None --- + ╟── 4 + ╎ ├─╼ 0 ╾ 3 + ╎ ├─╼ 5 ╾ 7 + ╎ │ └─╼ 3 + ╎ │ ├─╼ 1 ╾ 9 + ╎ │ │ └─╼ 9 ╾ 6 + ╎ │ │ ├─╼ 6 + ╎ │ │ │ └─╼ ... + ╎ │ │ ├─╼ 7 ╾ 4 + ╎ │ │ │ ├─╼ 2 + ╎ │ │ │ └─╼ ... + ╎ │ │ └─╼ ... + ╎ │ └─╼ ... + ╎ └─╼ ... + ╙── 8 + --- directed case, max_depth=0 --- + ╙ ... + --- directed case, max_depth=1 --- + ╟── 4 + ╎ └─╼ ... + ╙── 8 + --- directed case, max_depth=2 --- + ╟── 4 + ╎ ├─╼ 0 ╾ 3 + ╎ ├─╼ 5 ╾ 7 + ╎ │ └─╼ ... + ╎ └─╼ 7 ╾ 9 + ╎ └─╼ ... + ╙── 8 + --- directed case, max_depth=3 --- + ╟── 4 + ╎ ├─╼ 0 ╾ 3 + ╎ ├─╼ 5 ╾ 7 + ╎ │ └─╼ 3 + ╎ │ └─╼ ... + ╎ └─╼ 7 ╾ 9 + ╎ ├─╼ 2 + ╎ └─╼ ... + ╙── 8 + --- undirected case, max_depth=None --- + ╟── 8 + ╙── 2 + └── 7 + ├── 4 + │ ├── 0 + │ │ └── 3 + │ │ ├── 1 + │ │ │ └── 9 ─ 7 + │ │ │ └── 6 + │ │ └── 5 ─ 4, 7 + │ └── ... + └── ... + --- undirected case, max_depth=0 --- + ╙ ... + --- undirected case, max_depth=1 --- + ╟── 8 + ╙── 2 ─ 7 + └── ... + --- undirected case, max_depth=2 --- + ╟── 8 + ╙── 2 + └── 7 ─ 4, 5, 9 + └── ... + --- undirected case, max_depth=3 --- + ╟── 8 + ╙── 2 + └── 7 + ├── 4 ─ 0, 5 + │ └── ... + ├── 5 ─ 4, 3 + │ └── ... + └── 9 ─ 1, 6 + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_clique_max_depth(): + orig = nx.complete_graph(5, nx.DiGraph) + lines = [] + write = lines.append + write("--- directed case, max_depth=None ---") + nx.write_network_text(orig, path=write, end="", max_depth=None) + write("--- directed case, max_depth=0 ---") + nx.write_network_text(orig, path=write, end="", max_depth=0) + write("--- directed case, max_depth=1 ---") + nx.write_network_text(orig, path=write, end="", max_depth=1) + write("--- directed case, max_depth=2 ---") + nx.write_network_text(orig, path=write, end="", max_depth=2) + write("--- directed case, max_depth=3 ---") + nx.write_network_text(orig, path=write, end="", max_depth=3) + write("--- undirected case, max_depth=None ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=None) + write("--- undirected case, max_depth=0 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=0) + write("--- undirected case, max_depth=1 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=1) + write("--- undirected case, max_depth=2 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=2) + write("--- undirected case, max_depth=3 ---") + nx.write_network_text(orig.to_undirected(), path=write, end="", max_depth=3) + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- directed case, max_depth=None --- + ╙── 0 ╾ 1, 2, 3, 4 + ├─╼ 1 ╾ 2, 3, 4 + │ ├─╼ 2 ╾ 0, 3, 4 + │ │ ├─╼ 3 ╾ 0, 1, 4 + │ │ │ ├─╼ 4 ╾ 0, 1, 2 + │ │ │ │ └─╼ ... + │ │ │ └─╼ ... + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- directed case, max_depth=0 --- + ╙ ... + --- directed case, max_depth=1 --- + ╙── 0 ╾ 1, 2, 3, 4 + └─╼ ... + --- directed case, max_depth=2 --- + ╙── 0 ╾ 1, 2, 3, 4 + ├─╼ 1 ╾ 2, 3, 4 + │ └─╼ ... + ├─╼ 2 ╾ 1, 3, 4 + │ └─╼ ... + ├─╼ 3 ╾ 1, 2, 4 + │ └─╼ ... + └─╼ 4 ╾ 1, 2, 3 + └─╼ ... + --- directed case, max_depth=3 --- + ╙── 0 ╾ 1, 2, 3, 4 + ├─╼ 1 ╾ 2, 3, 4 + │ ├─╼ 2 ╾ 0, 3, 4 + │ │ └─╼ ... + │ ├─╼ 3 ╾ 0, 2, 4 + │ │ └─╼ ... + │ ├─╼ 4 ╾ 0, 2, 3 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + --- undirected case, max_depth=None --- + ╙── 0 + ├── 1 + │ ├── 2 ─ 0 + │ │ ├── 3 ─ 0, 1 + │ │ │ └── 4 ─ 0, 1, 2 + │ │ └── ... + │ └── ... + └── ... + --- undirected case, max_depth=0 --- + ╙ ... + --- undirected case, max_depth=1 --- + ╙── 0 ─ 1, 2, 3, 4 + └── ... + --- undirected case, max_depth=2 --- + ╙── 0 + ├── 1 ─ 2, 3, 4 + │ └── ... + ├── 2 ─ 1, 3, 4 + │ └── ... + ├── 3 ─ 1, 2, 4 + │ └── ... + └── 4 ─ 1, 2, 3 + --- undirected case, max_depth=3 --- + ╙── 0 + ├── 1 + │ ├── 2 ─ 0, 3, 4 + │ │ └── ... + │ ├── 3 ─ 0, 2, 4 + │ │ └── ... + │ └── 4 ─ 0, 2, 3 + └── ... + """ + ).strip() + assert target == text + + +def test_write_network_text_custom_label(): + # Create a directed forest with labels + graph = nx.erdos_renyi_graph(5, 0.4, directed=True, seed=359222358) + for node in graph.nodes: + graph.nodes[node]["label"] = f"Node({node})" + graph.nodes[node]["chr"] = chr(node + ord("a") - 1) + if node % 2 == 0: + graph.nodes[node]["part"] = chr(node + ord("a")) + + lines = [] + write = lines.append + write("--- when with_labels=True, uses the 'label' attr ---") + nx.write_network_text(graph, path=write, with_labels=True, end="", max_depth=None) + write("--- when with_labels=False, uses str(node) value ---") + nx.write_network_text(graph, path=write, with_labels=False, end="", max_depth=None) + write("--- when with_labels is a string, use that attr ---") + nx.write_network_text(graph, path=write, with_labels="chr", end="", max_depth=None) + write("--- fallback to str(node) when the attr does not exist ---") + nx.write_network_text(graph, path=write, with_labels="part", end="", max_depth=None) + + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- when with_labels=True, uses the 'label' attr --- + ╙── Node(1) + └─╼ Node(3) ╾ Node(2) + ├─╼ Node(0) + │ ├─╼ Node(2) ╾ Node(3), Node(4) + │ │ └─╼ ... + │ └─╼ Node(4) + │ └─╼ ... + └─╼ ... + --- when with_labels=False, uses str(node) value --- + ╙── 1 + └─╼ 3 ╾ 2 + ├─╼ 0 + │ ├─╼ 2 ╾ 3, 4 + │ │ └─╼ ... + │ └─╼ 4 + │ └─╼ ... + └─╼ ... + --- when with_labels is a string, use that attr --- + ╙── a + └─╼ c ╾ b + ├─╼ ` + │ ├─╼ b ╾ c, d + │ │ └─╼ ... + │ └─╼ d + │ └─╼ ... + └─╼ ... + --- fallback to str(node) when the attr does not exist --- + ╙── 1 + └─╼ 3 ╾ c + ├─╼ a + │ ├─╼ c ╾ 3, e + │ │ └─╼ ... + │ └─╼ e + │ └─╼ ... + └─╼ ... + """ + ).strip() + assert target == text + + +def test_write_network_text_vertical_chains(): + graph1 = nx.lollipop_graph(4, 2, create_using=nx.Graph) + graph1.add_edge(0, -1) + graph1.add_edge(-1, -2) + graph1.add_edge(-2, -3) + + graph2 = graph1.to_directed() + graph2.remove_edges_from([(u, v) for u, v in graph2.edges if v > u]) + + lines = [] + write = lines.append + write("--- Undirected UTF ---") + nx.write_network_text(graph1, path=write, end="", vertical_chains=True) + write("--- Undirected ASCI ---") + nx.write_network_text( + graph1, path=write, end="", vertical_chains=True, ascii_only=True + ) + write("--- Directed UTF ---") + nx.write_network_text(graph2, path=write, end="", vertical_chains=True) + write("--- Directed ASCI ---") + nx.write_network_text( + graph2, path=write, end="", vertical_chains=True, ascii_only=True + ) + + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- Undirected UTF --- + ╙── 5 + │ + 4 + │ + 3 + ├── 0 + │ ├── 1 ─ 3 + │ │ │ + │ │ 2 ─ 0, 3 + │ ├── -1 + │ │ │ + │ │ -2 + │ │ │ + │ │ -3 + │ └── ... + └── ... + --- Undirected ASCI --- + +-- 5 + | + 4 + | + 3 + |-- 0 + | |-- 1 - 3 + | | | + | | 2 - 0, 3 + | |-- -1 + | | | + | | -2 + | | | + | | -3 + | L-- ... + L-- ... + --- Directed UTF --- + ╙── 5 + ╽ + 4 + ╽ + 3 + ├─╼ 0 ╾ 1, 2 + │ ╽ + │ -1 + │ ╽ + │ -2 + │ ╽ + │ -3 + ├─╼ 1 ╾ 2 + │ └─╼ ... + └─╼ 2 + └─╼ ... + --- Directed ASCI --- + +-- 5 + ! + 4 + ! + 3 + |-> 0 <- 1, 2 + | ! + | -1 + | ! + | -2 + | ! + | -3 + |-> 1 <- 2 + | L-> ... + L-> 2 + L-> ... + """ + ).strip() + assert target == text + + +def test_collapse_directed(): + graph = nx.balanced_tree(r=2, h=3, create_using=nx.DiGraph) + lines = [] + write = lines.append + write("--- Original ---") + nx.write_network_text(graph, path=write, end="") + graph.nodes[1]["collapse"] = True + write("--- Collapse Node 1 ---") + nx.write_network_text(graph, path=write, end="") + write("--- Add alternate path (5, 3) to collapsed zone") + graph.add_edge(5, 3) + nx.write_network_text(graph, path=write, end="") + write("--- Collapse Node 0 ---") + graph.nodes[0]["collapse"] = True + nx.write_network_text(graph, path=write, end="") + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- Original --- + ╙── 0 + ├─╼ 1 + │ ├─╼ 3 + │ │ ├─╼ 7 + │ │ └─╼ 8 + │ └─╼ 4 + │ ├─╼ 9 + │ └─╼ 10 + └─╼ 2 + ├─╼ 5 + │ ├─╼ 11 + │ └─╼ 12 + └─╼ 6 + ├─╼ 13 + └─╼ 14 + --- Collapse Node 1 --- + ╙── 0 + ├─╼ 1 + │ └─╼ ... + └─╼ 2 + ├─╼ 5 + │ ├─╼ 11 + │ └─╼ 12 + └─╼ 6 + ├─╼ 13 + └─╼ 14 + --- Add alternate path (5, 3) to collapsed zone + ╙── 0 + ├─╼ 1 + │ └─╼ ... + └─╼ 2 + ├─╼ 5 + │ ├─╼ 11 + │ ├─╼ 12 + │ └─╼ 3 ╾ 1 + │ ├─╼ 7 + │ └─╼ 8 + └─╼ 6 + ├─╼ 13 + └─╼ 14 + --- Collapse Node 0 --- + ╙── 0 + └─╼ ... + """ + ).strip() + assert target == text + + +def test_collapse_undirected(): + graph = nx.balanced_tree(r=2, h=3, create_using=nx.Graph) + lines = [] + write = lines.append + write("--- Original ---") + nx.write_network_text(graph, path=write, end="", sources=[0]) + graph.nodes[1]["collapse"] = True + write("--- Collapse Node 1 ---") + nx.write_network_text(graph, path=write, end="", sources=[0]) + write("--- Add alternate path (5, 3) to collapsed zone") + graph.add_edge(5, 3) + nx.write_network_text(graph, path=write, end="", sources=[0]) + write("--- Collapse Node 0 ---") + graph.nodes[0]["collapse"] = True + nx.write_network_text(graph, path=write, end="", sources=[0]) + text = "\n".join(lines) + print(text) + target = dedent( + """ + --- Original --- + ╙── 0 + ├── 1 + │ ├── 3 + │ │ ├── 7 + │ │ └── 8 + │ └── 4 + │ ├── 9 + │ └── 10 + └── 2 + ├── 5 + │ ├── 11 + │ └── 12 + └── 6 + ├── 13 + └── 14 + --- Collapse Node 1 --- + ╙── 0 + ├── 1 ─ 3, 4 + │ └── ... + └── 2 + ├── 5 + │ ├── 11 + │ └── 12 + └── 6 + ├── 13 + └── 14 + --- Add alternate path (5, 3) to collapsed zone + ╙── 0 + ├── 1 ─ 3, 4 + │ └── ... + └── 2 + ├── 5 + │ ├── 11 + │ ├── 12 + │ └── 3 ─ 1 + │ ├── 7 + │ └── 8 + └── 6 + ├── 13 + └── 14 + --- Collapse Node 0 --- + ╙── 0 ─ 1, 2 + └── ... + """ + ).strip() + assert target == text + + +def generate_test_graphs(): + """ + Generate a gauntlet of different test graphs with different properties + """ + import random + + rng = random.Random(976689776) + num_randomized = 3 + + for directed in [0, 1]: + cls = nx.DiGraph if directed else nx.Graph + + for num_nodes in range(17): + # Disconnected graph + graph = cls() + graph.add_nodes_from(range(num_nodes)) + yield graph + + # Randomize graphs + if num_nodes > 0: + for p in [0.1, 0.3, 0.5, 0.7, 0.9]: + for seed in range(num_randomized): + graph = nx.erdos_renyi_graph( + num_nodes, p, directed=directed, seed=rng + ) + yield graph + + yield nx.complete_graph(num_nodes, cls) + + yield nx.path_graph(3, create_using=cls) + yield nx.balanced_tree(r=1, h=3, create_using=cls) + if not directed: + yield nx.circular_ladder_graph(4, create_using=cls) + yield nx.star_graph(5, create_using=cls) + yield nx.lollipop_graph(4, 2, create_using=cls) + yield nx.wheel_graph(7, create_using=cls) + yield nx.dorogovtsev_goltsev_mendes_graph(4, create_using=cls) + + +@pytest.mark.parametrize( + ("vertical_chains", "ascii_only"), + tuple( + [ + (vertical_chains, ascii_only) + for vertical_chains in [0, 1] + for ascii_only in [0, 1] + ] + ), +) +def test_network_text_round_trip(vertical_chains, ascii_only): + """ + Write the graph to network text format, then parse it back in, assert it is + the same as the original graph. Passing this test is strong validation of + both the format generator and parser. + """ + from networkx.readwrite.text import _parse_network_text + + for graph in generate_test_graphs(): + graph = nx.relabel_nodes(graph, {n: str(n) for n in graph.nodes}) + lines = list( + nx.generate_network_text( + graph, vertical_chains=vertical_chains, ascii_only=ascii_only + ) + ) + new = _parse_network_text(lines) + try: + assert new.nodes == graph.nodes + assert new.edges == graph.edges + except Exception: + print("ERROR in round trip with graph") + nx.write_network_text(graph) + raise diff --git a/phivenv/Lib/site-packages/networkx/readwrite/text.py b/phivenv/Lib/site-packages/networkx/readwrite/text.py new file mode 100644 index 0000000000000000000000000000000000000000..cc5606340c8d96ff5839d9f1247c23233f411306 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/readwrite/text.py @@ -0,0 +1,950 @@ +""" +Text-based visual representations of graphs +""" +import sys +import warnings +from collections import defaultdict + +import networkx as nx +from networkx.utils import open_file + +__all__ = ["forest_str", "generate_network_text", "write_network_text"] + + +class BaseGlyphs: + @classmethod + def as_dict(cls): + return { + a: getattr(cls, a) + for a in dir(cls) + if not a.startswith("_") and a != "as_dict" + } + + +class AsciiBaseGlyphs(BaseGlyphs): + empty: str = "+" + newtree_last: str = "+-- " + newtree_mid: str = "+-- " + endof_forest: str = " " + within_forest: str = ": " + within_tree: str = "| " + + +class AsciiDirectedGlyphs(AsciiBaseGlyphs): + last: str = "L-> " + mid: str = "|-> " + backedge: str = "<-" + vertical_edge: str = "!" + + +class AsciiUndirectedGlyphs(AsciiBaseGlyphs): + last: str = "L-- " + mid: str = "|-- " + backedge: str = "-" + vertical_edge: str = "|" + + +class UtfBaseGlyphs(BaseGlyphs): + # Notes on available box and arrow characters + # https://en.wikipedia.org/wiki/Box-drawing_character + # https://stackoverflow.com/questions/2701192/triangle-arrow + empty: str = "╙" + newtree_last: str = "╙── " + newtree_mid: str = "╟── " + endof_forest: str = " " + within_forest: str = "╎ " + within_tree: str = "│ " + + +class UtfDirectedGlyphs(UtfBaseGlyphs): + last: str = "└─╼ " + mid: str = "├─╼ " + backedge: str = "╾" + vertical_edge: str = "╽" + + +class UtfUndirectedGlyphs(UtfBaseGlyphs): + last: str = "└── " + mid: str = "├── " + backedge: str = "─" + vertical_edge: str = "│" + + +def generate_network_text( + graph, + with_labels=True, + sources=None, + max_depth=None, + ascii_only=False, + vertical_chains=False, +): + """Generate lines in the "network text" format + + This works via a depth-first traversal of the graph and writing a line for + each unique node encountered. Non-tree edges are written to the right of + each node, and connection to a non-tree edge is indicated with an ellipsis. + This representation works best when the input graph is a forest, but any + graph can be represented. + + This notation is original to networkx, although it is simple enough that it + may be known in existing literature. See #5602 for details. The procedure + is summarized as follows: + + 1. Given a set of source nodes (which can be specified, or automatically + discovered via finding the (strongly) connected components and choosing one + node with minimum degree from each), we traverse the graph in depth first + order. + + 2. Each reachable node will be printed exactly once on it's own line. + + 3. Edges are indicated in one of four ways: + + a. a parent "L-style" connection on the upper left. This corresponds to + a traversal in the directed DFS tree. + + b. a backref "<-style" connection shown directly on the right. For + directed graphs, these are drawn for any incoming edges to a node that + is not a parent edge. For undirected graphs, these are drawn for only + the non-parent edges that have already been represented (The edges that + have not been represented will be handled in the recursive case). + + c. a child "L-style" connection on the lower right. Drawing of the + children are handled recursively. + + d. if ``vertical_chains`` is true, and a parent node only has one child + a "vertical-style" edge is drawn between them. + + 4. The children of each node (wrt the directed DFS tree) are drawn + underneath and to the right of it. In the case that a child node has already + been drawn the connection is replaced with an ellipsis ("...") to indicate + that there is one or more connections represented elsewhere. + + 5. If a maximum depth is specified, an edge to nodes past this maximum + depth will be represented by an ellipsis. + + 6. If a a node has a truthy "collapse" value, then we do not traverse past + that node. + + Parameters + ---------- + graph : nx.DiGraph | nx.Graph + Graph to represent + + with_labels : bool | str + If True will use the "label" attribute of a node to display if it + exists otherwise it will use the node value itself. If given as a + string, then that attribute name will be used instead of "label". + Defaults to True. + + sources : List + Specifies which nodes to start traversal from. Note: nodes that are not + reachable from one of these sources may not be shown. If unspecified, + the minimal set of nodes needed to reach all others will be used. + + max_depth : int | None + The maximum depth to traverse before stopping. Defaults to None. + + ascii_only : Boolean + If True only ASCII characters are used to construct the visualization + + vertical_chains : Boolean + If True, chains of nodes will be drawn vertically when possible. + + Yields + ------ + str : a line of generated text + + Examples + -------- + >>> graph = nx.path_graph(10) + >>> graph.add_node('A') + >>> graph.add_node('B') + >>> graph.add_node('C') + >>> graph.add_node('D') + >>> graph.add_edge(9, 'A') + >>> graph.add_edge(9, 'B') + >>> graph.add_edge(9, 'C') + >>> graph.add_edge('C', 'D') + >>> graph.add_edge('C', 'E') + >>> graph.add_edge('C', 'F') + >>> nx.write_network_text(graph) + ╙── 0 + └── 1 + └── 2 + └── 3 + └── 4 + └── 5 + └── 6 + └── 7 + └── 8 + └── 9 + ├── A + ├── B + └── C + ├── D + ├── E + └── F + >>> nx.write_network_text(graph, vertical_chains=True) + ╙── 0 + │ + 1 + │ + 2 + │ + 3 + │ + 4 + │ + 5 + │ + 6 + │ + 7 + │ + 8 + │ + 9 + ├── A + ├── B + └── C + ├── D + ├── E + └── F + """ + from typing import Any, NamedTuple + + class StackFrame(NamedTuple): + parent: Any + node: Any + indents: list + this_islast: bool + this_vertical: bool + + collapse_attr = "collapse" + + is_directed = graph.is_directed() + + if is_directed: + glyphs = AsciiDirectedGlyphs if ascii_only else UtfDirectedGlyphs + succ = graph.succ + pred = graph.pred + else: + glyphs = AsciiUndirectedGlyphs if ascii_only else UtfUndirectedGlyphs + succ = graph.adj + pred = graph.adj + + if isinstance(with_labels, str): + label_attr = with_labels + elif with_labels: + label_attr = "label" + else: + label_attr = None + + if max_depth == 0: + yield glyphs.empty + " ..." + elif len(graph.nodes) == 0: + yield glyphs.empty + else: + # If the nodes to traverse are unspecified, find the minimal set of + # nodes that will reach the entire graph + if sources is None: + sources = _find_sources(graph) + + # Populate the stack with each: + # 1. parent node in the DFS tree (or None for root nodes), + # 2. the current node in the DFS tree + # 2. a list of indentations indicating depth + # 3. a flag indicating if the node is the final one to be written. + # Reverse the stack so sources are popped in the correct order. + last_idx = len(sources) - 1 + stack = [ + StackFrame(None, node, [], (idx == last_idx), False) + for idx, node in enumerate(sources) + ][::-1] + + num_skipped_children = defaultdict(lambda: 0) + seen_nodes = set() + while stack: + parent, node, indents, this_islast, this_vertical = stack.pop() + + if node is not Ellipsis: + skip = node in seen_nodes + if skip: + # Mark that we skipped a parent's child + num_skipped_children[parent] += 1 + + if this_islast: + # If we reached the last child of a parent, and we skipped + # any of that parents children, then we should emit an + # ellipsis at the end after this. + if num_skipped_children[parent] and parent is not None: + # Append the ellipsis to be emitted last + next_islast = True + try_frame = StackFrame( + node, Ellipsis, indents, next_islast, False + ) + stack.append(try_frame) + + # Redo this frame, but not as a last object + next_islast = False + try_frame = StackFrame( + parent, node, indents, next_islast, this_vertical + ) + stack.append(try_frame) + continue + + if skip: + continue + seen_nodes.add(node) + + if not indents: + # Top level items (i.e. trees in the forest) get different + # glyphs to indicate they are not actually connected + if this_islast: + this_vertical = False + this_prefix = indents + [glyphs.newtree_last] + next_prefix = indents + [glyphs.endof_forest] + else: + this_prefix = indents + [glyphs.newtree_mid] + next_prefix = indents + [glyphs.within_forest] + + else: + # Non-top-level items + if this_vertical: + this_prefix = indents + next_prefix = indents + else: + if this_islast: + this_prefix = indents + [glyphs.last] + next_prefix = indents + [glyphs.endof_forest] + else: + this_prefix = indents + [glyphs.mid] + next_prefix = indents + [glyphs.within_tree] + + if node is Ellipsis: + label = " ..." + suffix = "" + children = [] + else: + if label_attr is not None: + label = str(graph.nodes[node].get(label_attr, node)) + else: + label = str(node) + + # Determine if we want to show the children of this node. + if collapse_attr is not None: + collapse = graph.nodes[node].get(collapse_attr, False) + else: + collapse = False + + # Determine: + # (1) children to traverse into after showing this node. + # (2) parents to immediately show to the right of this node. + if is_directed: + # In the directed case we must show every successor node + # note: it may be skipped later, but we don't have that + # information here. + children = list(succ[node]) + # In the directed case we must show every predecessor + # except for parent we directly traversed from. + handled_parents = {parent} + else: + # Showing only the unseen children results in a more + # concise representation for the undirected case. + children = [ + child for child in succ[node] if child not in seen_nodes + ] + + # In the undirected case, parents are also children, so we + # only need to immediately show the ones we can no longer + # traverse + handled_parents = {*children, parent} + + if max_depth is not None and len(indents) == max_depth - 1: + # Use ellipsis to indicate we have reached maximum depth + if children: + children = [Ellipsis] + handled_parents = {parent} + + if collapse: + # Collapsing a node is the same as reaching maximum depth + if children: + children = [Ellipsis] + handled_parents = {parent} + + # The other parents are other predecessors of this node that + # are not handled elsewhere. + other_parents = [p for p in pred[node] if p not in handled_parents] + if other_parents: + if label_attr is not None: + other_parents_labels = ", ".join( + [ + str(graph.nodes[p].get(label_attr, p)) + for p in other_parents + ] + ) + else: + other_parents_labels = ", ".join( + [str(p) for p in other_parents] + ) + suffix = " ".join(["", glyphs.backedge, other_parents_labels]) + else: + suffix = "" + + # Emit the line for this node, this will be called for each node + # exactly once. + if this_vertical: + yield "".join(this_prefix + [glyphs.vertical_edge]) + + yield "".join(this_prefix + [label, suffix]) + + if vertical_chains: + if is_directed: + num_children = len(set(children)) + else: + num_children = len(set(children) - {parent}) + # The next node can be drawn vertically if it is the only + # remaining child of this node. + next_is_vertical = num_children == 1 + else: + next_is_vertical = False + + # Push children on the stack in reverse order so they are popped in + # the original order. + for idx, child in enumerate(children[::-1]): + next_islast = idx == 0 + try_frame = StackFrame( + node, child, next_prefix, next_islast, next_is_vertical + ) + stack.append(try_frame) + + +@open_file(1, "w") +def write_network_text( + graph, + path=None, + with_labels=True, + sources=None, + max_depth=None, + ascii_only=False, + end="\n", + vertical_chains=False, +): + """Creates a nice text representation of a graph + + This works via a depth-first traversal of the graph and writing a line for + each unique node encountered. Non-tree edges are written to the right of + each node, and connection to a non-tree edge is indicated with an ellipsis. + This representation works best when the input graph is a forest, but any + graph can be represented. + + Parameters + ---------- + graph : nx.DiGraph | nx.Graph + Graph to represent + + path : string or file or callable or None + Filename or file handle for data output. + if a function, then it will be called for each generated line. + if None, this will default to "sys.stdout.write" + + with_labels : bool | str + If True will use the "label" attribute of a node to display if it + exists otherwise it will use the node value itself. If given as a + string, then that attribute name will be used instead of "label". + Defaults to True. + + sources : List + Specifies which nodes to start traversal from. Note: nodes that are not + reachable from one of these sources may not be shown. If unspecified, + the minimal set of nodes needed to reach all others will be used. + + max_depth : int | None + The maximum depth to traverse before stopping. Defaults to None. + + ascii_only : Boolean + If True only ASCII characters are used to construct the visualization + + end : string + The line ending character + + vertical_chains : Boolean + If True, chains of nodes will be drawn vertically when possible. + + Examples + -------- + >>> graph = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph) + >>> nx.write_network_text(graph) + ╙── 0 + ├─╼ 1 + │ ├─╼ 3 + │ └─╼ 4 + └─╼ 2 + ├─╼ 5 + └─╼ 6 + + >>> # A near tree with one non-tree edge + >>> graph.add_edge(5, 1) + >>> nx.write_network_text(graph) + ╙── 0 + ├─╼ 1 ╾ 5 + │ ├─╼ 3 + │ └─╼ 4 + └─╼ 2 + ├─╼ 5 + │ └─╼ ... + └─╼ 6 + + >>> graph = nx.cycle_graph(5) + >>> nx.write_network_text(graph) + ╙── 0 + ├── 1 + │ └── 2 + │ └── 3 + │ └── 4 ─ 0 + └── ... + + >>> graph = nx.cycle_graph(5, nx.DiGraph) + >>> nx.write_network_text(graph, vertical_chains=True) + ╙── 0 ╾ 4 + ╽ + 1 + ╽ + 2 + ╽ + 3 + ╽ + 4 + └─╼ ... + + >>> nx.write_network_text(graph, vertical_chains=True, ascii_only=True) + +-- 0 <- 4 + ! + 1 + ! + 2 + ! + 3 + ! + 4 + L-> ... + + >>> graph = nx.generators.barbell_graph(4, 2) + >>> nx.write_network_text(graph, vertical_chains=False) + ╙── 4 + ├── 5 + │ └── 6 + │ ├── 7 + │ │ ├── 8 ─ 6 + │ │ │ └── 9 ─ 6, 7 + │ │ └── ... + │ └── ... + └── 3 + ├── 0 + │ ├── 1 ─ 3 + │ │ └── 2 ─ 0, 3 + │ └── ... + └── ... + >>> nx.write_network_text(graph, vertical_chains=True) + ╙── 4 + ├── 5 + │ │ + │ 6 + │ ├── 7 + │ │ ├── 8 ─ 6 + │ │ │ │ + │ │ │ 9 ─ 6, 7 + │ │ └── ... + │ └── ... + └── 3 + ├── 0 + │ ├── 1 ─ 3 + │ │ │ + │ │ 2 ─ 0, 3 + │ └── ... + └── ... + + >>> graph = nx.complete_graph(5, create_using=nx.Graph) + >>> nx.write_network_text(graph) + ╙── 0 + ├── 1 + │ ├── 2 ─ 0 + │ │ ├── 3 ─ 0, 1 + │ │ │ └── 4 ─ 0, 1, 2 + │ │ └── ... + │ └── ... + └── ... + + >>> graph = nx.complete_graph(3, create_using=nx.DiGraph) + >>> nx.write_network_text(graph) + ╙── 0 ╾ 1, 2 + ├─╼ 1 ╾ 2 + │ ├─╼ 2 ╾ 0 + │ │ └─╼ ... + │ └─╼ ... + └─╼ ... + """ + if path is None: + # The path is unspecified, write to stdout + _write = sys.stdout.write + elif hasattr(path, "write"): + # The path is already an open file + _write = path.write + elif callable(path): + # The path is a custom callable + _write = path + else: + raise TypeError(type(path)) + + for line in generate_network_text( + graph, + with_labels=with_labels, + sources=sources, + max_depth=max_depth, + ascii_only=ascii_only, + vertical_chains=vertical_chains, + ): + _write(line + end) + + +def _find_sources(graph): + """ + Determine a minimal set of nodes such that the entire graph is reachable + """ + # For each connected part of the graph, choose at least + # one node as a starting point, preferably without a parent + if graph.is_directed(): + # Choose one node from each SCC with minimum in_degree + sccs = list(nx.strongly_connected_components(graph)) + # condensing the SCCs forms a dag, the nodes in this graph with + # 0 in-degree correspond to the SCCs from which the minimum set + # of nodes from which all other nodes can be reached. + scc_graph = nx.condensation(graph, sccs) + supernode_to_nodes = {sn: [] for sn in scc_graph.nodes()} + # Note: the order of mapping differs between pypy and cpython + # so we have to loop over graph nodes for consistency + mapping = scc_graph.graph["mapping"] + for n in graph.nodes: + sn = mapping[n] + supernode_to_nodes[sn].append(n) + sources = [] + for sn in scc_graph.nodes(): + if scc_graph.in_degree[sn] == 0: + scc = supernode_to_nodes[sn] + node = min(scc, key=lambda n: graph.in_degree[n]) + sources.append(node) + else: + # For undirected graph, the entire graph will be reachable as + # long as we consider one node from every connected component + sources = [ + min(cc, key=lambda n: graph.degree[n]) + for cc in nx.connected_components(graph) + ] + sources = sorted(sources, key=lambda n: graph.degree[n]) + return sources + + +def forest_str(graph, with_labels=True, sources=None, write=None, ascii_only=False): + """Creates a nice utf8 representation of a forest + + This function has been superseded by + :func:`nx.readwrite.text.generate_network_text`, which should be used + instead. + + Parameters + ---------- + graph : nx.DiGraph | nx.Graph + Graph to represent (must be a tree, forest, or the empty graph) + + with_labels : bool + If True will use the "label" attribute of a node to display if it + exists otherwise it will use the node value itself. Defaults to True. + + sources : List + Mainly relevant for undirected forests, specifies which nodes to list + first. If unspecified the root nodes of each tree will be used for + directed forests; for undirected forests this defaults to the nodes + with the smallest degree. + + write : callable + Function to use to write to, if None new lines are appended to + a list and returned. If set to the `print` function, lines will + be written to stdout as they are generated. If specified, + this function will return None. Defaults to None. + + ascii_only : Boolean + If True only ASCII characters are used to construct the visualization + + Returns + ------- + str | None : + utf8 representation of the tree / forest + + Examples + -------- + >>> graph = nx.balanced_tree(r=2, h=3, create_using=nx.DiGraph) + >>> print(nx.forest_str(graph)) + ╙── 0 + ├─╼ 1 + │ ├─╼ 3 + │ │ ├─╼ 7 + │ │ └─╼ 8 + │ └─╼ 4 + │ ├─╼ 9 + │ └─╼ 10 + └─╼ 2 + ├─╼ 5 + │ ├─╼ 11 + │ └─╼ 12 + └─╼ 6 + ├─╼ 13 + └─╼ 14 + + + >>> graph = nx.balanced_tree(r=1, h=2, create_using=nx.Graph) + >>> print(nx.forest_str(graph)) + ╙── 0 + └── 1 + └── 2 + + >>> print(nx.forest_str(graph, ascii_only=True)) + +-- 0 + L-- 1 + L-- 2 + """ + msg = ( + "\nforest_str is deprecated as of version 3.1 and will be removed " + "in version 3.3. Use generate_network_text or write_network_text " + "instead.\n" + ) + warnings.warn(msg, DeprecationWarning) + + if len(graph.nodes) > 0: + if not nx.is_forest(graph): + raise nx.NetworkXNotImplemented("input must be a forest or the empty graph") + + printbuf = [] + if write is None: + _write = printbuf.append + else: + _write = write + + write_network_text( + graph, + _write, + with_labels=with_labels, + sources=sources, + ascii_only=ascii_only, + end="", + ) + + if write is None: + # Only return a string if the custom write function was not specified + return "\n".join(printbuf) + + +def _parse_network_text(lines): + """Reconstructs a graph from a network text representation. + + This is mainly used for testing. Network text is for display, not + serialization, as such this cannot parse all network text representations + because node labels can be ambiguous with the glyphs and indentation used + to represent edge structure. Additionally, there is no way to determine if + disconnected graphs were originally directed or undirected. + + Parameters + ---------- + lines : list or iterator of strings + Input data in network text format + + Returns + ------- + G: NetworkX graph + The graph corresponding to the lines in network text format. + """ + from itertools import chain + from typing import Any, NamedTuple, Union + + class ParseStackFrame(NamedTuple): + node: Any + indent: int + has_vertical_child: Union[int, None] + + initial_line_iter = iter(lines) + + is_ascii = None + is_directed = None + + ############## + # Initial Pass + ############## + + # Do an initial pass over the lines to determine what type of graph it is. + # Remember what these lines were, so we can reiterate over them in the + # parsing pass. + initial_lines = [] + try: + first_line = next(initial_line_iter) + except StopIteration: + ... + else: + initial_lines.append(first_line) + # The first character indicates if it is an ASCII or UTF graph + first_char = first_line[0] + if first_char in { + UtfBaseGlyphs.empty, + UtfBaseGlyphs.newtree_mid[0], + UtfBaseGlyphs.newtree_last[0], + }: + is_ascii = False + elif first_char in { + AsciiBaseGlyphs.empty, + AsciiBaseGlyphs.newtree_mid[0], + AsciiBaseGlyphs.newtree_last[0], + }: + is_ascii = True + else: + raise AssertionError(f"Unexpected first character: {first_char}") + + if is_ascii: + directed_glyphs = AsciiDirectedGlyphs.as_dict() + undirected_glyphs = AsciiUndirectedGlyphs.as_dict() + else: + directed_glyphs = UtfDirectedGlyphs.as_dict() + undirected_glyphs = UtfUndirectedGlyphs.as_dict() + + # For both directed / undirected glyphs, determine which glyphs never + # appear as substrings in the other undirected / directed glyphs. Glyphs + # with this property unambiguously indicates if a graph is directed / + # undirected. + directed_items = set(directed_glyphs.values()) + undirected_items = set(undirected_glyphs.values()) + unambiguous_directed_items = [] + for item in directed_items: + other_items = undirected_items + other_supersets = [other for other in other_items if item in other] + if not other_supersets: + unambiguous_directed_items.append(item) + unambiguous_undirected_items = [] + for item in undirected_items: + other_items = directed_items + other_supersets = [other for other in other_items if item in other] + if not other_supersets: + unambiguous_undirected_items.append(item) + + for line in initial_line_iter: + initial_lines.append(line) + if any(item in line for item in unambiguous_undirected_items): + is_directed = False + break + elif any(item in line for item in unambiguous_directed_items): + is_directed = True + break + + if is_directed is None: + # Not enough information to determine, choose undirected by default + is_directed = False + + glyphs = directed_glyphs if is_directed else undirected_glyphs + + # the backedge symbol by itself can be ambiguous, but with spaces around it + # becomes unambiguous. + backedge_symbol = " " + glyphs["backedge"] + " " + + # Reconstruct an iterator over all of the lines. + parsing_line_iter = chain(initial_lines, initial_line_iter) + + ############## + # Parsing Pass + ############## + + edges = [] + nodes = [] + is_empty = None + + noparent = object() # sentinel value + + # keep a stack of previous nodes that could be parents of subsequent nodes + stack = [ParseStackFrame(noparent, -1, None)] + + for line in parsing_line_iter: + if line == glyphs["empty"]: + # If the line is the empty glyph, we are done. + # There shouldn't be anything else after this. + is_empty = True + continue + + if backedge_symbol in line: + # This line has one or more backedges, separate those out + node_part, backedge_part = line.split(backedge_symbol) + backedge_nodes = [u.strip() for u in backedge_part.split(", ")] + # Now the node can be parsed + node_part = node_part.rstrip() + prefix, node = node_part.rsplit(" ", 1) + node = node.strip() + # Add the backedges to the edge list + edges.extend([(u, node) for u in backedge_nodes]) + else: + # No backedge, the tail of this line is the node + prefix, node = line.rsplit(" ", 1) + node = node.strip() + + prev = stack.pop() + + if node in glyphs["vertical_edge"]: + # Previous node is still the previous node, but we know it will + # have exactly one child, which will need to have its nesting level + # adjusted. + modified_prev = ParseStackFrame( + prev.node, + prev.indent, + True, + ) + stack.append(modified_prev) + continue + + # The length of the string before the node characters give us a hint + # about our nesting level. The only case where this doesn't work is + # when there are vertical chains, which is handled explicitly. + indent = len(prefix) + curr = ParseStackFrame(node, indent, None) + + if prev.has_vertical_child: + # In this case we know prev must be the parent of our current line, + # so we don't have to search the stack. (which is good because the + # indentation check wouldn't work in this case). + ... + else: + # If the previous node nesting-level is greater than the current + # nodes nesting-level than the previous node was the end of a path, + # and is not our parent. We can safely pop nodes off the stack + # until we find one with a comparable nesting-level, which is our + # parent. + while curr.indent <= prev.indent: + prev = stack.pop() + + if node == "...": + # The current previous node is no longer a valid parent, + # keep it popped from the stack. + stack.append(prev) + else: + # The previous and current nodes may still be parents, so add them + # back onto the stack. + stack.append(prev) + stack.append(curr) + + # Add the node and the edge to its parent to the node / edge lists. + nodes.append(curr.node) + if prev.node is not noparent: + edges.append((prev.node, curr.node)) + + if is_empty: + # Sanity check + assert len(nodes) == 0 + + # Reconstruct the graph + cls = nx.DiGraph if is_directed else nx.Graph + new = cls() + new.add_nodes_from(nodes) + new.add_edges_from(edges) + return new diff --git a/phivenv/Lib/site-packages/networkx/tests/__init__.py b/phivenv/Lib/site-packages/networkx/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/networkx/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36130a498321e3c604fffadfb9cbbef33e6d3ed4 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_all_random_functions.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_all_random_functions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca08c64a68cfac3760e33a5c741d62f88dba0fcf Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_all_random_functions.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_convert.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_convert.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63ceea1be30cf11a4dc04aa920a1e4a46edbae17 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_convert.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_convert_numpy.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_convert_numpy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..743751bbe3dba14c9375a3dd98f96ee9b22f11c9 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_convert_numpy.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_convert_pandas.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_convert_pandas.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2afae5724e93e381f053e08e5bea32e7c660b10e Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_convert_pandas.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_convert_scipy.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_convert_scipy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81317f966e82e98971eddc06751f7bcc9c98a223 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_convert_scipy.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_exceptions.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_exceptions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2deacb0ff7197eb8ddbada4cedb5defb4afc7745 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_exceptions.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_import.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_import.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56820e31cea85298d73c4745de90ddd6be2c17b9 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_import.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_lazy_imports.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_lazy_imports.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71547ec36da0b8f1c49ba7f000bcc078d7708529 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_lazy_imports.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_relabel.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_relabel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c34fa59b9b89474272014615a8347c50b4d4784c Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/tests/__pycache__/test_relabel.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/tests/test_all_random_functions.py b/phivenv/Lib/site-packages/networkx/tests/test_all_random_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..828c3137aa1c9183d45279769809eff8c0642b70 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/tests/test_all_random_functions.py @@ -0,0 +1,247 @@ +import pytest + +np = pytest.importorskip("numpy") +import random + +import networkx as nx +from networkx.algorithms import approximation as approx +from networkx.algorithms import threshold + +progress = 0 + +# store the random numbers after setting a global seed +np.random.seed(42) +np_rv = np.random.rand() +random.seed(42) +py_rv = random.random() + + +def t(f, *args, **kwds): + """call one function and check if global RNG changed""" + global progress + progress += 1 + print(progress, ",", end="") + + f(*args, **kwds) + + after_np_rv = np.random.rand() + # if np_rv != after_np_rv: + # print(np_rv, after_np_rv, "don't match np!") + assert np_rv == after_np_rv + np.random.seed(42) + + after_py_rv = random.random() + # if py_rv != after_py_rv: + # print(py_rv, after_py_rv, "don't match py!") + assert py_rv == after_py_rv + random.seed(42) + + +def run_all_random_functions(seed): + n = 20 + m = 10 + k = l = 2 + s = v = 10 + p = q = p1 = p2 = p_in = p_out = 0.4 + alpha = radius = theta = 0.75 + sizes = (20, 20, 10) + colors = [1, 2, 3] + G = nx.barbell_graph(12, 20) + H = nx.cycle_graph(3) + H.add_weighted_edges_from((u, v, 0.2) for u, v in H.edges) + deg_sequence = [3, 2, 1, 3, 2, 1, 3, 2, 1, 2, 1, 2, 1] + in_degree_sequence = w = sequence = aseq = bseq = deg_sequence + + # print("starting...") + t(nx.maximal_independent_set, G, seed=seed) + t(nx.rich_club_coefficient, G, seed=seed, normalized=False) + t(nx.random_reference, G, seed=seed) + t(nx.lattice_reference, G, seed=seed) + t(nx.sigma, G, 1, 2, seed=seed) + t(nx.omega, G, 1, 2, seed=seed) + # print("out of smallworld.py") + t(nx.double_edge_swap, G, seed=seed) + # print("starting connected_double_edge_swap") + t(nx.connected_double_edge_swap, nx.complete_graph(9), seed=seed) + # print("ending connected_double_edge_swap") + t(nx.random_layout, G, seed=seed) + t(nx.fruchterman_reingold_layout, G, seed=seed) + t(nx.algebraic_connectivity, G, seed=seed) + t(nx.fiedler_vector, G, seed=seed) + t(nx.spectral_ordering, G, seed=seed) + # print('starting average_clustering') + t(approx.average_clustering, G, seed=seed) + t(approx.simulated_annealing_tsp, H, "greedy", source=1, seed=seed) + t(approx.threshold_accepting_tsp, H, "greedy", source=1, seed=seed) + t( + approx.traveling_salesman_problem, + H, + method=lambda G, wt: approx.simulated_annealing_tsp(G, "greedy", wt, seed=seed), + ) + t( + approx.traveling_salesman_problem, + H, + method=lambda G, wt: approx.threshold_accepting_tsp(G, "greedy", wt, seed=seed), + ) + t(nx.betweenness_centrality, G, seed=seed) + t(nx.edge_betweenness_centrality, G, seed=seed) + t(nx.approximate_current_flow_betweenness_centrality, G, seed=seed) + # print("kernighan") + t(nx.algorithms.community.kernighan_lin_bisection, G, seed=seed) + # nx.algorithms.community.asyn_lpa_communities(G, seed=seed) + t(nx.algorithms.tree.greedy_branching, G, seed=seed) + t(nx.algorithms.tree.Edmonds, G, seed=seed) + # print('done with graph argument functions') + + t(nx.spectral_graph_forge, G, alpha, seed=seed) + t(nx.algorithms.community.asyn_fluidc, G, k, max_iter=1, seed=seed) + t( + nx.algorithms.connectivity.edge_augmentation.greedy_k_edge_augmentation, + G, + k, + seed=seed, + ) + t(nx.algorithms.coloring.strategy_random_sequential, G, colors, seed=seed) + + cs = ["d", "i", "i", "d", "d", "i"] + t(threshold.swap_d, cs, seed=seed) + t(nx.configuration_model, deg_sequence, seed=seed) + t( + nx.directed_configuration_model, + in_degree_sequence, + in_degree_sequence, + seed=seed, + ) + t(nx.expected_degree_graph, w, seed=seed) + t(nx.random_degree_sequence_graph, sequence, seed=seed) + joint_degrees = { + 1: {4: 1}, + 2: {2: 2, 3: 2, 4: 2}, + 3: {2: 2, 4: 1}, + 4: {1: 1, 2: 2, 3: 1}, + } + t(nx.joint_degree_graph, joint_degrees, seed=seed) + joint_degree_sequence = [ + (1, 0), + (1, 0), + (1, 0), + (2, 0), + (1, 0), + (2, 1), + (0, 1), + (0, 1), + ] + t(nx.random_clustered_graph, joint_degree_sequence, seed=seed) + constructor = [(3, 3, 0.5), (10, 10, 0.7)] + t(nx.random_shell_graph, constructor, seed=seed) + t(nx.random_triad, G.to_directed(), seed=seed) + mapping = {1: 0.4, 2: 0.3, 3: 0.3} + t(nx.utils.random_weighted_sample, mapping, k, seed=seed) + t(nx.utils.weighted_choice, mapping, seed=seed) + t(nx.algorithms.bipartite.configuration_model, aseq, bseq, seed=seed) + t(nx.algorithms.bipartite.preferential_attachment_graph, aseq, p, seed=seed) + + def kernel_integral(u, w, z): + return z - w + + t(nx.random_kernel_graph, n, kernel_integral, seed=seed) + + sizes = [75, 75, 300] + probs = [[0.25, 0.05, 0.02], [0.05, 0.35, 0.07], [0.02, 0.07, 0.40]] + t(nx.stochastic_block_model, sizes, probs, seed=seed) + t(nx.random_partition_graph, sizes, p_in, p_out, seed=seed) + + # print("starting generator functions") + t(threshold.random_threshold_sequence, n, p, seed=seed) + t(nx.tournament.random_tournament, n, seed=seed) + t(nx.relaxed_caveman_graph, l, k, p, seed=seed) + t(nx.planted_partition_graph, l, k, p_in, p_out, seed=seed) + t(nx.gaussian_random_partition_graph, n, s, v, p_in, p_out, seed=seed) + t(nx.gn_graph, n, seed=seed) + t(nx.gnr_graph, n, p, seed=seed) + t(nx.gnc_graph, n, seed=seed) + t(nx.scale_free_graph, n, seed=seed) + t(nx.directed.random_uniform_k_out_graph, n, k, seed=seed) + t(nx.random_k_out_graph, n, k, alpha, seed=seed) + N = 1000 + t(nx.partial_duplication_graph, N, n, p, q, seed=seed) + t(nx.duplication_divergence_graph, n, p, seed=seed) + t(nx.random_geometric_graph, n, radius, seed=seed) + t(nx.soft_random_geometric_graph, n, radius, seed=seed) + t(nx.geographical_threshold_graph, n, theta, seed=seed) + t(nx.waxman_graph, n, seed=seed) + t(nx.navigable_small_world_graph, n, seed=seed) + t(nx.thresholded_random_geometric_graph, n, radius, theta, seed=seed) + t(nx.uniform_random_intersection_graph, n, m, p, seed=seed) + t(nx.k_random_intersection_graph, n, m, k, seed=seed) + + t(nx.general_random_intersection_graph, n, 2, [0.1, 0.5], seed=seed) + t(nx.fast_gnp_random_graph, n, p, seed=seed) + t(nx.gnp_random_graph, n, p, seed=seed) + t(nx.dense_gnm_random_graph, n, m, seed=seed) + t(nx.gnm_random_graph, n, m, seed=seed) + t(nx.newman_watts_strogatz_graph, n, k, p, seed=seed) + t(nx.watts_strogatz_graph, n, k, p, seed=seed) + t(nx.connected_watts_strogatz_graph, n, k, p, seed=seed) + t(nx.random_regular_graph, 3, n, seed=seed) + t(nx.barabasi_albert_graph, n, m, seed=seed) + t(nx.extended_barabasi_albert_graph, n, m, p, q, seed=seed) + t(nx.powerlaw_cluster_graph, n, m, p, seed=seed) + t(nx.random_lobster, n, p1, p2, seed=seed) + t(nx.random_powerlaw_tree, n, seed=seed, tries=5000) + t(nx.random_powerlaw_tree_sequence, 10, seed=seed, tries=5000) + t(nx.random_tree, n, seed=seed) + t(nx.utils.powerlaw_sequence, n, seed=seed) + t(nx.utils.zipf_rv, 2.3, seed=seed) + cdist = [0.2, 0.4, 0.5, 0.7, 0.9, 1.0] + t(nx.utils.discrete_sequence, n, cdistribution=cdist, seed=seed) + t(nx.algorithms.bipartite.random_graph, n, m, p, seed=seed) + t(nx.algorithms.bipartite.gnmk_random_graph, n, m, k, seed=seed) + LFR = nx.generators.LFR_benchmark_graph + t( + LFR, + 25, + 3, + 1.5, + 0.1, + average_degree=3, + min_community=10, + seed=seed, + max_community=20, + ) + t(nx.random_internet_as_graph, n, seed=seed) + # print("done") + + +# choose to test an integer seed, or whether a single RNG can be everywhere +# np_rng = np.random.RandomState(14) +# seed = np_rng +# seed = 14 + + +@pytest.mark.slow +# print("NetworkX Version:", nx.__version__) +def test_rng_interface(): + global progress + + # try different kinds of seeds + for seed in [14, np.random.RandomState(14)]: + np.random.seed(42) + random.seed(42) + run_all_random_functions(seed) + progress = 0 + + # check that both global RNGs are unaffected + after_np_rv = np.random.rand() + # if np_rv != after_np_rv: + # print(np_rv, after_np_rv, "don't match np!") + assert np_rv == after_np_rv + after_py_rv = random.random() + # if py_rv != after_py_rv: + # print(py_rv, after_py_rv, "don't match py!") + assert py_rv == after_py_rv + + +# print("\nDone testing seed:", seed) + +# test_rng_interface() diff --git a/phivenv/Lib/site-packages/networkx/tests/test_convert.py b/phivenv/Lib/site-packages/networkx/tests/test_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..44bed9438945a39bb5eb85477301f58cfcd70cf0 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/tests/test_convert.py @@ -0,0 +1,321 @@ +import pytest + +import networkx as nx +from networkx.convert import ( + from_dict_of_dicts, + from_dict_of_lists, + to_dict_of_dicts, + to_dict_of_lists, + to_networkx_graph, +) +from networkx.generators.classic import barbell_graph, cycle_graph +from networkx.utils import edges_equal, graphs_equal, nodes_equal + + +class TestConvert: + def edgelists_equal(self, e1, e2): + return sorted(sorted(e) for e in e1) == sorted(sorted(e) for e in e2) + + def test_simple_graphs(self): + for dest, source in [ + (to_dict_of_dicts, from_dict_of_dicts), + (to_dict_of_lists, from_dict_of_lists), + ]: + G = barbell_graph(10, 3) + G.graph = {} + dod = dest(G) + + # Dict of [dicts, lists] + GG = source(dod) + assert graphs_equal(G, GG) + GW = to_networkx_graph(dod) + assert graphs_equal(G, GW) + GI = nx.Graph(dod) + assert graphs_equal(G, GI) + + # With nodelist keyword + P4 = nx.path_graph(4) + P3 = nx.path_graph(3) + P4.graph = {} + P3.graph = {} + dod = dest(P4, nodelist=[0, 1, 2]) + Gdod = nx.Graph(dod) + assert graphs_equal(Gdod, P3) + + def test_exceptions(self): + # NX graph + class G: + adj = None + + pytest.raises(nx.NetworkXError, to_networkx_graph, G) + + # pygraphviz agraph + class G: + is_strict = None + + pytest.raises(nx.NetworkXError, to_networkx_graph, G) + + # Dict of [dicts, lists] + G = {"a": 0} + pytest.raises(TypeError, to_networkx_graph, G) + + # list or generator of edges + class G: + next = None + + pytest.raises(nx.NetworkXError, to_networkx_graph, G) + + # no match + pytest.raises(nx.NetworkXError, to_networkx_graph, "a") + + def test_digraphs(self): + for dest, source in [ + (to_dict_of_dicts, from_dict_of_dicts), + (to_dict_of_lists, from_dict_of_lists), + ]: + G = cycle_graph(10) + + # Dict of [dicts, lists] + dod = dest(G) + GG = source(dod) + assert nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GG.edges())) + GW = to_networkx_graph(dod) + assert nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GW.edges())) + GI = nx.Graph(dod) + assert nodes_equal(sorted(G.nodes()), sorted(GI.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GI.edges())) + + G = cycle_graph(10, create_using=nx.DiGraph) + dod = dest(G) + GG = source(dod, create_using=nx.DiGraph) + assert sorted(G.nodes()) == sorted(GG.nodes()) + assert sorted(G.edges()) == sorted(GG.edges()) + GW = to_networkx_graph(dod, create_using=nx.DiGraph) + assert sorted(G.nodes()) == sorted(GW.nodes()) + assert sorted(G.edges()) == sorted(GW.edges()) + GI = nx.DiGraph(dod) + assert sorted(G.nodes()) == sorted(GI.nodes()) + assert sorted(G.edges()) == sorted(GI.edges()) + + def test_graph(self): + g = nx.cycle_graph(10) + G = nx.Graph() + G.add_nodes_from(g) + G.add_weighted_edges_from((u, v, u) for u, v in g.edges()) + + # Dict of dicts + dod = to_dict_of_dicts(G) + GG = from_dict_of_dicts(dod, create_using=nx.Graph) + assert nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GG.edges())) + GW = to_networkx_graph(dod, create_using=nx.Graph) + assert nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GW.edges())) + GI = nx.Graph(dod) + assert sorted(G.nodes()) == sorted(GI.nodes()) + assert sorted(G.edges()) == sorted(GI.edges()) + + # Dict of lists + dol = to_dict_of_lists(G) + GG = from_dict_of_lists(dol, create_using=nx.Graph) + # dict of lists throws away edge data so set it to none + enone = [(u, v, {}) for (u, v, d) in G.edges(data=True)] + assert nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) + assert edges_equal(enone, sorted(GG.edges(data=True))) + GW = to_networkx_graph(dol, create_using=nx.Graph) + assert nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) + assert edges_equal(enone, sorted(GW.edges(data=True))) + GI = nx.Graph(dol) + assert nodes_equal(sorted(G.nodes()), sorted(GI.nodes())) + assert edges_equal(enone, sorted(GI.edges(data=True))) + + def test_with_multiedges_self_loops(self): + G = cycle_graph(10) + XG = nx.Graph() + XG.add_nodes_from(G) + XG.add_weighted_edges_from((u, v, u) for u, v in G.edges()) + XGM = nx.MultiGraph() + XGM.add_nodes_from(G) + XGM.add_weighted_edges_from((u, v, u) for u, v in G.edges()) + XGM.add_edge(0, 1, weight=2) # multiedge + XGS = nx.Graph() + XGS.add_nodes_from(G) + XGS.add_weighted_edges_from((u, v, u) for u, v in G.edges()) + XGS.add_edge(0, 0, weight=100) # self loop + + # Dict of dicts + # with self loops, OK + dod = to_dict_of_dicts(XGS) + GG = from_dict_of_dicts(dod, create_using=nx.Graph) + assert nodes_equal(XGS.nodes(), GG.nodes()) + assert edges_equal(XGS.edges(), GG.edges()) + GW = to_networkx_graph(dod, create_using=nx.Graph) + assert nodes_equal(XGS.nodes(), GW.nodes()) + assert edges_equal(XGS.edges(), GW.edges()) + GI = nx.Graph(dod) + assert nodes_equal(XGS.nodes(), GI.nodes()) + assert edges_equal(XGS.edges(), GI.edges()) + + # Dict of lists + # with self loops, OK + dol = to_dict_of_lists(XGS) + GG = from_dict_of_lists(dol, create_using=nx.Graph) + # dict of lists throws away edge data so set it to none + enone = [(u, v, {}) for (u, v, d) in XGS.edges(data=True)] + assert nodes_equal(sorted(XGS.nodes()), sorted(GG.nodes())) + assert edges_equal(enone, sorted(GG.edges(data=True))) + GW = to_networkx_graph(dol, create_using=nx.Graph) + assert nodes_equal(sorted(XGS.nodes()), sorted(GW.nodes())) + assert edges_equal(enone, sorted(GW.edges(data=True))) + GI = nx.Graph(dol) + assert nodes_equal(sorted(XGS.nodes()), sorted(GI.nodes())) + assert edges_equal(enone, sorted(GI.edges(data=True))) + + # Dict of dicts + # with multiedges, OK + dod = to_dict_of_dicts(XGM) + GG = from_dict_of_dicts(dod, create_using=nx.MultiGraph, multigraph_input=True) + assert nodes_equal(sorted(XGM.nodes()), sorted(GG.nodes())) + assert edges_equal(sorted(XGM.edges()), sorted(GG.edges())) + GW = to_networkx_graph(dod, create_using=nx.MultiGraph, multigraph_input=True) + assert nodes_equal(sorted(XGM.nodes()), sorted(GW.nodes())) + assert edges_equal(sorted(XGM.edges()), sorted(GW.edges())) + GI = nx.MultiGraph(dod) + assert nodes_equal(sorted(XGM.nodes()), sorted(GI.nodes())) + assert sorted(XGM.edges()) == sorted(GI.edges()) + GE = from_dict_of_dicts(dod, create_using=nx.MultiGraph, multigraph_input=False) + assert nodes_equal(sorted(XGM.nodes()), sorted(GE.nodes())) + assert sorted(XGM.edges()) != sorted(GE.edges()) + GI = nx.MultiGraph(XGM) + assert nodes_equal(sorted(XGM.nodes()), sorted(GI.nodes())) + assert edges_equal(sorted(XGM.edges()), sorted(GI.edges())) + GM = nx.MultiGraph(G) + assert nodes_equal(sorted(GM.nodes()), sorted(G.nodes())) + assert edges_equal(sorted(GM.edges()), sorted(G.edges())) + + # Dict of lists + # with multiedges, OK, but better write as DiGraph else you'll + # get double edges + dol = to_dict_of_lists(G) + GG = from_dict_of_lists(dol, create_using=nx.MultiGraph) + assert nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GG.edges())) + GW = to_networkx_graph(dol, create_using=nx.MultiGraph) + assert nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GW.edges())) + GI = nx.MultiGraph(dol) + assert nodes_equal(sorted(G.nodes()), sorted(GI.nodes())) + assert edges_equal(sorted(G.edges()), sorted(GI.edges())) + + def test_edgelists(self): + P = nx.path_graph(4) + e = [(0, 1), (1, 2), (2, 3)] + G = nx.Graph(e) + assert nodes_equal(sorted(G.nodes()), sorted(P.nodes())) + assert edges_equal(sorted(G.edges()), sorted(P.edges())) + assert edges_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True))) + + e = [(0, 1, {}), (1, 2, {}), (2, 3, {})] + G = nx.Graph(e) + assert nodes_equal(sorted(G.nodes()), sorted(P.nodes())) + assert edges_equal(sorted(G.edges()), sorted(P.edges())) + assert edges_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True))) + + e = ((n, n + 1) for n in range(3)) + G = nx.Graph(e) + assert nodes_equal(sorted(G.nodes()), sorted(P.nodes())) + assert edges_equal(sorted(G.edges()), sorted(P.edges())) + assert edges_equal(sorted(G.edges(data=True)), sorted(P.edges(data=True))) + + def test_directed_to_undirected(self): + edges1 = [(0, 1), (1, 2), (2, 0)] + edges2 = [(0, 1), (1, 2), (0, 2)] + assert self.edgelists_equal(nx.Graph(nx.DiGraph(edges1)).edges(), edges1) + assert self.edgelists_equal(nx.Graph(nx.DiGraph(edges2)).edges(), edges1) + assert self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges1)).edges(), edges1) + assert self.edgelists_equal(nx.MultiGraph(nx.DiGraph(edges2)).edges(), edges1) + + assert self.edgelists_equal( + nx.MultiGraph(nx.MultiDiGraph(edges1)).edges(), edges1 + ) + assert self.edgelists_equal( + nx.MultiGraph(nx.MultiDiGraph(edges2)).edges(), edges1 + ) + + assert self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges1)).edges(), edges1) + assert self.edgelists_equal(nx.Graph(nx.MultiDiGraph(edges2)).edges(), edges1) + + def test_attribute_dict_integrity(self): + # we must not replace dict-like graph data structures with dicts + G = nx.Graph() + G.add_nodes_from("abc") + H = to_networkx_graph(G, create_using=nx.Graph) + assert list(H.nodes) == list(G.nodes) + H = nx.DiGraph(G) + assert list(H.nodes) == list(G.nodes) + + def test_to_edgelist(self): + G = nx.Graph([(1, 1)]) + elist = nx.to_edgelist(G, nodelist=list(G)) + assert edges_equal(G.edges(data=True), elist) + + def test_custom_node_attr_dict_safekeeping(self): + class custom_dict(dict): + pass + + class Custom(nx.Graph): + node_attr_dict_factory = custom_dict + + g = nx.Graph() + g.add_node(1, weight=1) + + h = Custom(g) + assert isinstance(g._node[1], dict) + assert isinstance(h._node[1], custom_dict) + + # this raise exception + # h._node.update((n, dd.copy()) for n, dd in g.nodes.items()) + # assert isinstance(h._node[1], custom_dict) + + +@pytest.mark.parametrize( + "edgelist", + ( + # Graph with no edge data + [(0, 1), (1, 2)], + # Graph with edge data + [(0, 1, {"weight": 1.0}), (1, 2, {"weight": 2.0})], + ), +) +def test_to_dict_of_dicts_with_edgedata_param(edgelist): + G = nx.Graph() + G.add_edges_from(edgelist) + # Innermost dict value == edge_data when edge_data != None. + # In the case when G has edge data, it is overwritten + expected = {0: {1: 10}, 1: {0: 10, 2: 10}, 2: {1: 10}} + assert nx.to_dict_of_dicts(G, edge_data=10) == expected + + +def test_to_dict_of_dicts_with_edgedata_and_nodelist(): + G = nx.path_graph(5) + nodelist = [2, 3, 4] + expected = {2: {3: 10}, 3: {2: 10, 4: 10}, 4: {3: 10}} + assert nx.to_dict_of_dicts(G, nodelist=nodelist, edge_data=10) == expected + + +def test_to_dict_of_dicts_with_edgedata_multigraph(): + """Multi edge data overwritten when edge_data != None""" + G = nx.MultiGraph() + G.add_edge(0, 1, key="a") + G.add_edge(0, 1, key="b") + # Multi edge data lost when edge_data is not None + expected = {0: {1: 10}, 1: {0: 10}} + assert nx.to_dict_of_dicts(G, edge_data=10) == expected + + +def test_to_networkx_graph_non_edgelist(): + invalid_edgelist = [1, 2, 3] + with pytest.raises(nx.NetworkXError, match="Input is not a valid edge list"): + nx.to_networkx_graph(invalid_edgelist) diff --git a/phivenv/Lib/site-packages/networkx/tests/test_convert_numpy.py b/phivenv/Lib/site-packages/networkx/tests/test_convert_numpy.py new file mode 100644 index 0000000000000000000000000000000000000000..ab73172a4b55eb91e60747f5a8957d7a600eb85a --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/tests/test_convert_numpy.py @@ -0,0 +1,395 @@ +import pytest + +np = pytest.importorskip("numpy") +npt = pytest.importorskip("numpy.testing") + +import networkx as nx +from networkx.generators.classic import barbell_graph, cycle_graph, path_graph +from networkx.utils import graphs_equal + + +class TestConvertNumpyArray: + def setup_method(self): + self.G1 = barbell_graph(10, 3) + self.G2 = cycle_graph(10, create_using=nx.DiGraph) + self.G3 = self.create_weighted(nx.Graph()) + self.G4 = self.create_weighted(nx.DiGraph()) + + def create_weighted(self, G): + g = cycle_graph(4) + G.add_nodes_from(g) + G.add_weighted_edges_from((u, v, 10 + u) for u, v in g.edges()) + return G + + def assert_equal(self, G1, G2): + assert sorted(G1.nodes()) == sorted(G2.nodes()) + assert sorted(G1.edges()) == sorted(G2.edges()) + + def identity_conversion(self, G, A, create_using): + assert A.sum() > 0 + GG = nx.from_numpy_array(A, create_using=create_using) + self.assert_equal(G, GG) + GW = nx.to_networkx_graph(A, create_using=create_using) + self.assert_equal(G, GW) + GI = nx.empty_graph(0, create_using).__class__(A) + self.assert_equal(G, GI) + + def test_shape(self): + "Conversion from non-square array." + A = np.array([[1, 2, 3], [4, 5, 6]]) + pytest.raises(nx.NetworkXError, nx.from_numpy_array, A) + + def test_identity_graph_array(self): + "Conversion from graph to array to graph." + A = nx.to_numpy_array(self.G1) + self.identity_conversion(self.G1, A, nx.Graph()) + + def test_identity_digraph_array(self): + """Conversion from digraph to array to digraph.""" + A = nx.to_numpy_array(self.G2) + self.identity_conversion(self.G2, A, nx.DiGraph()) + + def test_identity_weighted_graph_array(self): + """Conversion from weighted graph to array to weighted graph.""" + A = nx.to_numpy_array(self.G3) + self.identity_conversion(self.G3, A, nx.Graph()) + + def test_identity_weighted_digraph_array(self): + """Conversion from weighted digraph to array to weighted digraph.""" + A = nx.to_numpy_array(self.G4) + self.identity_conversion(self.G4, A, nx.DiGraph()) + + def test_nodelist(self): + """Conversion from graph to array to graph with nodelist.""" + P4 = path_graph(4) + P3 = path_graph(3) + nodelist = list(P3) + A = nx.to_numpy_array(P4, nodelist=nodelist) + GA = nx.Graph(A) + self.assert_equal(GA, P3) + + # Make nodelist ambiguous by containing duplicates. + nodelist += [nodelist[0]] + pytest.raises(nx.NetworkXError, nx.to_numpy_array, P3, nodelist=nodelist) + + # Make nodelist invalid by including nonexistent nodes + nodelist = [-1, 0, 1] + with pytest.raises( + nx.NetworkXError, + match=f"Nodes {nodelist - P3.nodes} in nodelist is not in G", + ): + nx.to_numpy_array(P3, nodelist=nodelist) + + def test_weight_keyword(self): + WP4 = nx.Graph() + WP4.add_edges_from((n, n + 1, {"weight": 0.5, "other": 0.3}) for n in range(3)) + P4 = path_graph(4) + A = nx.to_numpy_array(P4) + np.testing.assert_equal(A, nx.to_numpy_array(WP4, weight=None)) + np.testing.assert_equal(0.5 * A, nx.to_numpy_array(WP4)) + np.testing.assert_equal(0.3 * A, nx.to_numpy_array(WP4, weight="other")) + + def test_from_numpy_array_type(self): + A = np.array([[1]]) + G = nx.from_numpy_array(A) + assert type(G[0][0]["weight"]) == int + + A = np.array([[1]]).astype(float) + G = nx.from_numpy_array(A) + assert type(G[0][0]["weight"]) == float + + A = np.array([[1]]).astype(str) + G = nx.from_numpy_array(A) + assert type(G[0][0]["weight"]) == str + + A = np.array([[1]]).astype(bool) + G = nx.from_numpy_array(A) + assert type(G[0][0]["weight"]) == bool + + A = np.array([[1]]).astype(complex) + G = nx.from_numpy_array(A) + assert type(G[0][0]["weight"]) == complex + + A = np.array([[1]]).astype(object) + pytest.raises(TypeError, nx.from_numpy_array, A) + + A = np.array([[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]]) + with pytest.raises( + nx.NetworkXError, match=f"Input array must be 2D, not {A.ndim}" + ): + g = nx.from_numpy_array(A) + + def test_from_numpy_array_dtype(self): + dt = [("weight", float), ("cost", int)] + A = np.array([[(1.0, 2)]], dtype=dt) + G = nx.from_numpy_array(A) + assert type(G[0][0]["weight"]) == float + assert type(G[0][0]["cost"]) == int + assert G[0][0]["cost"] == 2 + assert G[0][0]["weight"] == 1.0 + + def test_from_numpy_array_parallel_edges(self): + """Tests that the :func:`networkx.from_numpy_array` function + interprets integer weights as the number of parallel edges when + creating a multigraph. + + """ + A = np.array([[1, 1], [1, 2]]) + # First, with a simple graph, each integer entry in the adjacency + # matrix is interpreted as the weight of a single edge in the graph. + expected = nx.DiGraph() + edges = [(0, 0), (0, 1), (1, 0)] + expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges]) + expected.add_edge(1, 1, weight=2) + actual = nx.from_numpy_array(A, parallel_edges=True, create_using=nx.DiGraph) + assert graphs_equal(actual, expected) + actual = nx.from_numpy_array(A, parallel_edges=False, create_using=nx.DiGraph) + assert graphs_equal(actual, expected) + # Now each integer entry in the adjacency matrix is interpreted as the + # number of parallel edges in the graph if the appropriate keyword + # argument is specified. + edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)] + expected = nx.MultiDiGraph() + expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges]) + actual = nx.from_numpy_array( + A, parallel_edges=True, create_using=nx.MultiDiGraph + ) + assert graphs_equal(actual, expected) + expected = nx.MultiDiGraph() + expected.add_edges_from(set(edges), weight=1) + # The sole self-loop (edge 0) on vertex 1 should have weight 2. + expected[1][1][0]["weight"] = 2 + actual = nx.from_numpy_array( + A, parallel_edges=False, create_using=nx.MultiDiGraph + ) + assert graphs_equal(actual, expected) + + @pytest.mark.parametrize( + "dt", + ( + None, # default + int, # integer dtype + np.dtype( + [("weight", "f8"), ("color", "i1")] + ), # Structured dtype with named fields + ), + ) + def test_from_numpy_array_no_edge_attr(self, dt): + A = np.array([[0, 1], [1, 0]], dtype=dt) + G = nx.from_numpy_array(A, edge_attr=None) + assert "weight" not in G.edges[0, 1] + assert len(G.edges[0, 1]) == 0 + + def test_from_numpy_array_multiedge_no_edge_attr(self): + A = np.array([[0, 2], [2, 0]]) + G = nx.from_numpy_array(A, create_using=nx.MultiDiGraph, edge_attr=None) + assert all("weight" not in e for _, e in G[0][1].items()) + assert len(G[0][1][0]) == 0 + + def test_from_numpy_array_custom_edge_attr(self): + A = np.array([[0, 2], [3, 0]]) + G = nx.from_numpy_array(A, edge_attr="cost") + assert "weight" not in G.edges[0, 1] + assert G.edges[0, 1]["cost"] == 3 + + def test_symmetric(self): + """Tests that a symmetric array has edges added only once to an + undirected multigraph when using :func:`networkx.from_numpy_array`. + + """ + A = np.array([[0, 1], [1, 0]]) + G = nx.from_numpy_array(A, create_using=nx.MultiGraph) + expected = nx.MultiGraph() + expected.add_edge(0, 1, weight=1) + assert graphs_equal(G, expected) + + def test_dtype_int_graph(self): + """Test that setting dtype int actually gives an integer array. + + For more information, see GitHub pull request #1363. + + """ + G = nx.complete_graph(3) + A = nx.to_numpy_array(G, dtype=int) + assert A.dtype == int + + def test_dtype_int_multigraph(self): + """Test that setting dtype int actually gives an integer array. + + For more information, see GitHub pull request #1363. + + """ + G = nx.MultiGraph(nx.complete_graph(3)) + A = nx.to_numpy_array(G, dtype=int) + assert A.dtype == int + + +@pytest.fixture +def multigraph_test_graph(): + G = nx.MultiGraph() + G.add_edge(1, 2, weight=7) + G.add_edge(1, 2, weight=70) + return G + + +@pytest.mark.parametrize(("operator", "expected"), ((sum, 77), (min, 7), (max, 70))) +def test_numpy_multigraph(multigraph_test_graph, operator, expected): + A = nx.to_numpy_array(multigraph_test_graph, multigraph_weight=operator) + assert A[1, 0] == expected + + +def test_to_numpy_array_multigraph_nodelist(multigraph_test_graph): + G = multigraph_test_graph + G.add_edge(0, 1, weight=3) + A = nx.to_numpy_array(G, nodelist=[1, 2]) + assert A.shape == (2, 2) + assert A[1, 0] == 77 + + +@pytest.mark.parametrize( + "G, expected", + [ + (nx.Graph(), np.array([[0, 1 + 2j], [1 + 2j, 0]], dtype=complex)), + (nx.DiGraph(), np.array([[0, 1 + 2j], [0, 0]], dtype=complex)), + ], +) +def test_to_numpy_array_complex_weights(G, expected): + G.add_edge(0, 1, weight=1 + 2j) + A = nx.to_numpy_array(G, dtype=complex) + npt.assert_array_equal(A, expected) + + +def test_to_numpy_array_arbitrary_weights(): + G = nx.DiGraph() + w = 922337203685477580102 # Out of range for int64 + G.add_edge(0, 1, weight=922337203685477580102) # val not representable by int64 + A = nx.to_numpy_array(G, dtype=object) + expected = np.array([[0, w], [0, 0]], dtype=object) + npt.assert_array_equal(A, expected) + + # Undirected + A = nx.to_numpy_array(G.to_undirected(), dtype=object) + expected = np.array([[0, w], [w, 0]], dtype=object) + npt.assert_array_equal(A, expected) + + +@pytest.mark.parametrize( + "func, expected", + ((min, -1), (max, 10), (sum, 11), (np.mean, 11 / 3), (np.median, 2)), +) +def test_to_numpy_array_multiweight_reduction(func, expected): + """Test various functions for reducing multiedge weights.""" + G = nx.MultiDiGraph() + weights = [-1, 2, 10.0] + for w in weights: + G.add_edge(0, 1, weight=w) + A = nx.to_numpy_array(G, multigraph_weight=func, dtype=float) + assert np.allclose(A, [[0, expected], [0, 0]]) + + # Undirected case + A = nx.to_numpy_array(G.to_undirected(), multigraph_weight=func, dtype=float) + assert np.allclose(A, [[0, expected], [expected, 0]]) + + +@pytest.mark.parametrize( + ("G, expected"), + [ + (nx.Graph(), [[(0, 0), (10, 5)], [(10, 5), (0, 0)]]), + (nx.DiGraph(), [[(0, 0), (10, 5)], [(0, 0), (0, 0)]]), + ], +) +def test_to_numpy_array_structured_dtype_attrs_from_fields(G, expected): + """When `dtype` is structured (i.e. has names) and `weight` is None, use + the named fields of the dtype to look up edge attributes.""" + G.add_edge(0, 1, weight=10, cost=5.0) + dtype = np.dtype([("weight", int), ("cost", int)]) + A = nx.to_numpy_array(G, dtype=dtype, weight=None) + expected = np.asarray(expected, dtype=dtype) + npt.assert_array_equal(A, expected) + + +def test_to_numpy_array_structured_dtype_single_attr_default(): + G = nx.path_graph(3) + dtype = np.dtype([("weight", float)]) # A single named field + A = nx.to_numpy_array(G, dtype=dtype, weight=None) + expected = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=float) + npt.assert_array_equal(A["weight"], expected) + + +@pytest.mark.parametrize( + ("field_name", "expected_attr_val"), + [ + ("weight", 1), + ("cost", 3), + ], +) +def test_to_numpy_array_structured_dtype_single_attr(field_name, expected_attr_val): + G = nx.Graph() + G.add_edge(0, 1, cost=3) + dtype = np.dtype([(field_name, float)]) + A = nx.to_numpy_array(G, dtype=dtype, weight=None) + expected = np.array([[0, expected_attr_val], [expected_attr_val, 0]], dtype=float) + npt.assert_array_equal(A[field_name], expected) + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +@pytest.mark.parametrize( + "edge", + [ + (0, 1), # No edge attributes + (0, 1, {"weight": 10}), # One edge attr + (0, 1, {"weight": 5, "flow": -4}), # Multiple but not all edge attrs + (0, 1, {"weight": 2.0, "cost": 10, "flow": -45}), # All attrs + ], +) +def test_to_numpy_array_structured_dtype_multiple_fields(graph_type, edge): + G = graph_type([edge]) + dtype = np.dtype([("weight", float), ("cost", float), ("flow", float)]) + A = nx.to_numpy_array(G, dtype=dtype, weight=None) + for attr in dtype.names: + expected = nx.to_numpy_array(G, dtype=float, weight=attr) + npt.assert_array_equal(A[attr], expected) + + +@pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) +def test_to_numpy_array_structured_dtype_scalar_nonedge(G): + G.add_edge(0, 1, weight=10) + dtype = np.dtype([("weight", float), ("cost", float)]) + A = nx.to_numpy_array(G, dtype=dtype, weight=None, nonedge=np.nan) + for attr in dtype.names: + expected = nx.to_numpy_array(G, dtype=float, weight=attr, nonedge=np.nan) + npt.assert_array_equal(A[attr], expected) + + +@pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) +def test_to_numpy_array_structured_dtype_nonedge_ary(G): + """Similar to the scalar case, except has a different non-edge value for + each named field.""" + G.add_edge(0, 1, weight=10) + dtype = np.dtype([("weight", float), ("cost", float)]) + nonedges = np.array([(0, np.inf)], dtype=dtype) + A = nx.to_numpy_array(G, dtype=dtype, weight=None, nonedge=nonedges) + for attr in dtype.names: + nonedge = nonedges[attr] + expected = nx.to_numpy_array(G, dtype=float, weight=attr, nonedge=nonedge) + npt.assert_array_equal(A[attr], expected) + + +def test_to_numpy_array_structured_dtype_with_weight_raises(): + """Using both a structured dtype (with named fields) and specifying a `weight` + parameter is ambiguous.""" + G = nx.path_graph(3) + dtype = np.dtype([("weight", int), ("cost", int)]) + exception_msg = "Specifying `weight` not supported for structured dtypes" + with pytest.raises(ValueError, match=exception_msg): + nx.to_numpy_array(G, dtype=dtype) # Default is weight="weight" + with pytest.raises(ValueError, match=exception_msg): + nx.to_numpy_array(G, dtype=dtype, weight="cost") + + +@pytest.mark.parametrize("graph_type", (nx.MultiGraph, nx.MultiDiGraph)) +def test_to_numpy_array_structured_multigraph_raises(graph_type): + G = nx.path_graph(3, create_using=graph_type) + dtype = np.dtype([("weight", int), ("cost", int)]) + with pytest.raises(nx.NetworkXError, match="Structured arrays are not supported"): + nx.to_numpy_array(G, dtype=dtype, weight=None) diff --git a/phivenv/Lib/site-packages/networkx/tests/test_convert_pandas.py b/phivenv/Lib/site-packages/networkx/tests/test_convert_pandas.py new file mode 100644 index 0000000000000000000000000000000000000000..ca8d08c705f142bb24232aaf63f9b3397375409e --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/tests/test_convert_pandas.py @@ -0,0 +1,320 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal, graphs_equal, nodes_equal + +np = pytest.importorskip("numpy") +pd = pytest.importorskip("pandas") + + +class TestConvertPandas: + def setup_method(self): + self.rng = np.random.RandomState(seed=5) + ints = self.rng.randint(1, 11, size=(3, 2)) + a = ["A", "B", "C"] + b = ["D", "A", "E"] + df = pd.DataFrame(ints, columns=["weight", "cost"]) + df[0] = a # Column label 0 (int) + df["b"] = b # Column label 'b' (str) + self.df = df + + mdf = pd.DataFrame([[4, 16, "A", "D"]], columns=["weight", "cost", 0, "b"]) + self.mdf = pd.concat([df, mdf]) + + def test_exceptions(self): + G = pd.DataFrame(["a"]) # adj + pytest.raises(nx.NetworkXError, nx.to_networkx_graph, G) + G = pd.DataFrame(["a", 0.0]) # elist + pytest.raises(nx.NetworkXError, nx.to_networkx_graph, G) + df = pd.DataFrame([[1, 1], [1, 0]], dtype=int, index=[1, 2], columns=["a", "b"]) + pytest.raises(nx.NetworkXError, nx.from_pandas_adjacency, df) + + def test_from_edgelist_all_attr(self): + Gtrue = nx.Graph( + [ + ("E", "C", {"cost": 9, "weight": 10}), + ("B", "A", {"cost": 1, "weight": 7}), + ("A", "D", {"cost": 7, "weight": 4}), + ] + ) + G = nx.from_pandas_edgelist(self.df, 0, "b", True) + assert graphs_equal(G, Gtrue) + # MultiGraph + MGtrue = nx.MultiGraph(Gtrue) + MGtrue.add_edge("A", "D", cost=16, weight=4) + MG = nx.from_pandas_edgelist(self.mdf, 0, "b", True, nx.MultiGraph()) + assert graphs_equal(MG, MGtrue) + + def test_from_edgelist_multi_attr(self): + Gtrue = nx.Graph( + [ + ("E", "C", {"cost": 9, "weight": 10}), + ("B", "A", {"cost": 1, "weight": 7}), + ("A", "D", {"cost": 7, "weight": 4}), + ] + ) + G = nx.from_pandas_edgelist(self.df, 0, "b", ["weight", "cost"]) + assert graphs_equal(G, Gtrue) + + def test_from_edgelist_multi_attr_incl_target(self): + Gtrue = nx.Graph( + [ + ("E", "C", {0: "C", "b": "E", "weight": 10}), + ("B", "A", {0: "B", "b": "A", "weight": 7}), + ("A", "D", {0: "A", "b": "D", "weight": 4}), + ] + ) + G = nx.from_pandas_edgelist(self.df, 0, "b", [0, "b", "weight"]) + assert graphs_equal(G, Gtrue) + + def test_from_edgelist_multidigraph_and_edge_attr(self): + # example from issue #2374 + edges = [ + ("X1", "X4", {"Co": "zA", "Mi": 0, "St": "X1"}), + ("X1", "X4", {"Co": "zB", "Mi": 54, "St": "X2"}), + ("X1", "X4", {"Co": "zB", "Mi": 49, "St": "X3"}), + ("X1", "X4", {"Co": "zB", "Mi": 44, "St": "X4"}), + ("Y1", "Y3", {"Co": "zC", "Mi": 0, "St": "Y1"}), + ("Y1", "Y3", {"Co": "zC", "Mi": 34, "St": "Y2"}), + ("Y1", "Y3", {"Co": "zC", "Mi": 29, "St": "X2"}), + ("Y1", "Y3", {"Co": "zC", "Mi": 24, "St": "Y3"}), + ("Z1", "Z3", {"Co": "zD", "Mi": 0, "St": "Z1"}), + ("Z1", "Z3", {"Co": "zD", "Mi": 14, "St": "X3"}), + ] + Gtrue = nx.MultiDiGraph(edges) + data = { + "O": ["X1", "X1", "X1", "X1", "Y1", "Y1", "Y1", "Y1", "Z1", "Z1"], + "D": ["X4", "X4", "X4", "X4", "Y3", "Y3", "Y3", "Y3", "Z3", "Z3"], + "St": ["X1", "X2", "X3", "X4", "Y1", "Y2", "X2", "Y3", "Z1", "X3"], + "Co": ["zA", "zB", "zB", "zB", "zC", "zC", "zC", "zC", "zD", "zD"], + "Mi": [0, 54, 49, 44, 0, 34, 29, 24, 0, 14], + } + df = pd.DataFrame.from_dict(data) + G1 = nx.from_pandas_edgelist( + df, source="O", target="D", edge_attr=True, create_using=nx.MultiDiGraph + ) + G2 = nx.from_pandas_edgelist( + df, + source="O", + target="D", + edge_attr=["St", "Co", "Mi"], + create_using=nx.MultiDiGraph, + ) + assert graphs_equal(G1, Gtrue) + assert graphs_equal(G2, Gtrue) + + def test_from_edgelist_one_attr(self): + Gtrue = nx.Graph( + [ + ("E", "C", {"weight": 10}), + ("B", "A", {"weight": 7}), + ("A", "D", {"weight": 4}), + ] + ) + G = nx.from_pandas_edgelist(self.df, 0, "b", "weight") + assert graphs_equal(G, Gtrue) + + def test_from_edgelist_int_attr_name(self): + # note: this also tests that edge_attr can be `source` + Gtrue = nx.Graph( + [("E", "C", {0: "C"}), ("B", "A", {0: "B"}), ("A", "D", {0: "A"})] + ) + G = nx.from_pandas_edgelist(self.df, 0, "b", 0) + assert graphs_equal(G, Gtrue) + + def test_from_edgelist_invalid_attr(self): + pytest.raises( + nx.NetworkXError, nx.from_pandas_edgelist, self.df, 0, "b", "misspell" + ) + pytest.raises(nx.NetworkXError, nx.from_pandas_edgelist, self.df, 0, "b", 1) + # see Issue #3562 + edgeframe = pd.DataFrame([[0, 1], [1, 2], [2, 0]], columns=["s", "t"]) + pytest.raises( + nx.NetworkXError, nx.from_pandas_edgelist, edgeframe, "s", "t", True + ) + pytest.raises( + nx.NetworkXError, nx.from_pandas_edgelist, edgeframe, "s", "t", "weight" + ) + pytest.raises( + nx.NetworkXError, + nx.from_pandas_edgelist, + edgeframe, + "s", + "t", + ["weight", "size"], + ) + + def test_from_edgelist_no_attr(self): + Gtrue = nx.Graph([("E", "C", {}), ("B", "A", {}), ("A", "D", {})]) + G = nx.from_pandas_edgelist(self.df, 0, "b") + assert graphs_equal(G, Gtrue) + + def test_from_edgelist(self): + # Pandas DataFrame + G = nx.cycle_graph(10) + G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges)) + + edgelist = nx.to_edgelist(G) + source = [s for s, t, d in edgelist] + target = [t for s, t, d in edgelist] + weight = [d["weight"] for s, t, d in edgelist] + edges = pd.DataFrame({"source": source, "target": target, "weight": weight}) + + GG = nx.from_pandas_edgelist(edges, edge_attr="weight") + assert nodes_equal(G.nodes(), GG.nodes()) + assert edges_equal(G.edges(), GG.edges()) + GW = nx.to_networkx_graph(edges, create_using=nx.Graph) + assert nodes_equal(G.nodes(), GW.nodes()) + assert edges_equal(G.edges(), GW.edges()) + + def test_to_edgelist_default_source_or_target_col_exists(self): + G = nx.path_graph(10) + G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges)) + nx.set_edge_attributes(G, 0, name="source") + pytest.raises(nx.NetworkXError, nx.to_pandas_edgelist, G) + + # drop source column to test an exception raised for the target column + for u, v, d in G.edges(data=True): + d.pop("source", None) + + nx.set_edge_attributes(G, 0, name="target") + pytest.raises(nx.NetworkXError, nx.to_pandas_edgelist, G) + + def test_to_edgelist_custom_source_or_target_col_exists(self): + G = nx.path_graph(10) + G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges)) + nx.set_edge_attributes(G, 0, name="source_col_name") + pytest.raises( + nx.NetworkXError, nx.to_pandas_edgelist, G, source="source_col_name" + ) + + # drop source column to test an exception raised for the target column + for u, v, d in G.edges(data=True): + d.pop("source_col_name", None) + + nx.set_edge_attributes(G, 0, name="target_col_name") + pytest.raises( + nx.NetworkXError, nx.to_pandas_edgelist, G, target="target_col_name" + ) + + def test_to_edgelist_edge_key_col_exists(self): + G = nx.path_graph(10, create_using=nx.MultiGraph) + G.add_weighted_edges_from((u, v, u) for u, v in list(G.edges())) + nx.set_edge_attributes(G, 0, name="edge_key_name") + pytest.raises( + nx.NetworkXError, nx.to_pandas_edgelist, G, edge_key="edge_key_name" + ) + + def test_from_adjacency(self): + nodelist = [1, 2] + dftrue = pd.DataFrame( + [[1, 1], [1, 0]], dtype=int, index=nodelist, columns=nodelist + ) + G = nx.Graph([(1, 1), (1, 2)]) + df = nx.to_pandas_adjacency(G, dtype=int) + pd.testing.assert_frame_equal(df, dftrue) + + @pytest.mark.parametrize("graph", [nx.Graph, nx.MultiGraph]) + def test_roundtrip(self, graph): + # edgelist + Gtrue = graph([(1, 1), (1, 2)]) + df = nx.to_pandas_edgelist(Gtrue) + G = nx.from_pandas_edgelist(df, create_using=graph) + assert graphs_equal(Gtrue, G) + # adjacency + adj = {1: {1: {"weight": 1}, 2: {"weight": 1}}, 2: {1: {"weight": 1}}} + Gtrue = graph(adj) + df = nx.to_pandas_adjacency(Gtrue, dtype=int) + G = nx.from_pandas_adjacency(df, create_using=graph) + assert graphs_equal(Gtrue, G) + + def test_from_adjacency_named(self): + # example from issue #3105 + data = { + "A": {"A": 0, "B": 0, "C": 0}, + "B": {"A": 1, "B": 0, "C": 0}, + "C": {"A": 0, "B": 1, "C": 0}, + } + dftrue = pd.DataFrame(data, dtype=np.intp) + df = dftrue[["A", "C", "B"]] + G = nx.from_pandas_adjacency(df, create_using=nx.DiGraph()) + df = nx.to_pandas_adjacency(G, dtype=np.intp) + pd.testing.assert_frame_equal(df, dftrue) + + def test_edgekey_with_multigraph(self): + df = pd.DataFrame( + { + "source": {"A": "N1", "B": "N2", "C": "N1", "D": "N1"}, + "target": {"A": "N2", "B": "N3", "C": "N1", "D": "N2"}, + "attr1": {"A": "F1", "B": "F2", "C": "F3", "D": "F4"}, + "attr2": {"A": 1, "B": 0, "C": 0, "D": 0}, + "attr3": {"A": 0, "B": 1, "C": 0, "D": 1}, + } + ) + Gtrue = nx.MultiGraph( + [ + ("N1", "N2", "F1", {"attr2": 1, "attr3": 0}), + ("N2", "N3", "F2", {"attr2": 0, "attr3": 1}), + ("N1", "N1", "F3", {"attr2": 0, "attr3": 0}), + ("N1", "N2", "F4", {"attr2": 0, "attr3": 1}), + ] + ) + # example from issue #4065 + G = nx.from_pandas_edgelist( + df, + source="source", + target="target", + edge_attr=["attr2", "attr3"], + edge_key="attr1", + create_using=nx.MultiGraph(), + ) + assert graphs_equal(G, Gtrue) + + df_roundtrip = nx.to_pandas_edgelist(G, edge_key="attr1") + df_roundtrip = df_roundtrip.sort_values("attr1") + df_roundtrip.index = ["A", "B", "C", "D"] + pd.testing.assert_frame_equal( + df, df_roundtrip[["source", "target", "attr1", "attr2", "attr3"]] + ) + + def test_edgekey_with_normal_graph_no_action(self): + Gtrue = nx.Graph( + [ + ("E", "C", {"cost": 9, "weight": 10}), + ("B", "A", {"cost": 1, "weight": 7}), + ("A", "D", {"cost": 7, "weight": 4}), + ] + ) + G = nx.from_pandas_edgelist(self.df, 0, "b", True, edge_key="weight") + assert graphs_equal(G, Gtrue) + + def test_nonexisting_edgekey_raises(self): + with pytest.raises(nx.exception.NetworkXError): + nx.from_pandas_edgelist( + self.df, + source="source", + target="target", + edge_key="Not_real", + edge_attr=True, + create_using=nx.MultiGraph(), + ) + + +def test_to_pandas_adjacency_with_nodelist(): + G = nx.complete_graph(5) + nodelist = [1, 4] + expected = pd.DataFrame( + [[0, 1], [1, 0]], dtype=int, index=nodelist, columns=nodelist + ) + pd.testing.assert_frame_equal( + expected, nx.to_pandas_adjacency(G, nodelist, dtype=int) + ) + + +def test_to_pandas_edgelist_with_nodelist(): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3)], weight=2.0) + G.add_edge(0, 5, weight=100) + df = nx.to_pandas_edgelist(G, nodelist=[1, 2]) + assert 0 not in df["source"].to_numpy() + assert 100 not in df["weight"].to_numpy() diff --git a/phivenv/Lib/site-packages/networkx/tests/test_convert_scipy.py b/phivenv/Lib/site-packages/networkx/tests/test_convert_scipy.py new file mode 100644 index 0000000000000000000000000000000000000000..aa513b859a3d697a6e342164c7d0b3eca8c93d4e --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/tests/test_convert_scipy.py @@ -0,0 +1,282 @@ +import pytest + +np = pytest.importorskip("numpy") +sp = pytest.importorskip("scipy") + +import networkx as nx +from networkx.generators.classic import barbell_graph, cycle_graph, path_graph +from networkx.utils import graphs_equal + + +class TestConvertScipy: + def setup_method(self): + self.G1 = barbell_graph(10, 3) + self.G2 = cycle_graph(10, create_using=nx.DiGraph) + + self.G3 = self.create_weighted(nx.Graph()) + self.G4 = self.create_weighted(nx.DiGraph()) + + def test_exceptions(self): + class G: + format = None + + pytest.raises(nx.NetworkXError, nx.to_networkx_graph, G) + + def create_weighted(self, G): + g = cycle_graph(4) + e = list(g.edges()) + source = [u for u, v in e] + dest = [v for u, v in e] + weight = [s + 10 for s in source] + ex = zip(source, dest, weight) + G.add_weighted_edges_from(ex) + return G + + def identity_conversion(self, G, A, create_using): + GG = nx.from_scipy_sparse_array(A, create_using=create_using) + assert nx.is_isomorphic(G, GG) + + GW = nx.to_networkx_graph(A, create_using=create_using) + assert nx.is_isomorphic(G, GW) + + GI = nx.empty_graph(0, create_using).__class__(A) + assert nx.is_isomorphic(G, GI) + + ACSR = A.tocsr() + GI = nx.empty_graph(0, create_using).__class__(ACSR) + assert nx.is_isomorphic(G, GI) + + ACOO = A.tocoo() + GI = nx.empty_graph(0, create_using).__class__(ACOO) + assert nx.is_isomorphic(G, GI) + + ACSC = A.tocsc() + GI = nx.empty_graph(0, create_using).__class__(ACSC) + assert nx.is_isomorphic(G, GI) + + AD = A.todense() + GI = nx.empty_graph(0, create_using).__class__(AD) + assert nx.is_isomorphic(G, GI) + + AA = A.toarray() + GI = nx.empty_graph(0, create_using).__class__(AA) + assert nx.is_isomorphic(G, GI) + + def test_shape(self): + "Conversion from non-square sparse array." + A = sp.sparse.lil_array([[1, 2, 3], [4, 5, 6]]) + pytest.raises(nx.NetworkXError, nx.from_scipy_sparse_array, A) + + def test_identity_graph_matrix(self): + "Conversion from graph to sparse matrix to graph." + A = nx.to_scipy_sparse_array(self.G1) + self.identity_conversion(self.G1, A, nx.Graph()) + + def test_identity_digraph_matrix(self): + "Conversion from digraph to sparse matrix to digraph." + A = nx.to_scipy_sparse_array(self.G2) + self.identity_conversion(self.G2, A, nx.DiGraph()) + + def test_identity_weighted_graph_matrix(self): + """Conversion from weighted graph to sparse matrix to weighted graph.""" + A = nx.to_scipy_sparse_array(self.G3) + self.identity_conversion(self.G3, A, nx.Graph()) + + def test_identity_weighted_digraph_matrix(self): + """Conversion from weighted digraph to sparse matrix to weighted digraph.""" + A = nx.to_scipy_sparse_array(self.G4) + self.identity_conversion(self.G4, A, nx.DiGraph()) + + def test_nodelist(self): + """Conversion from graph to sparse matrix to graph with nodelist.""" + P4 = path_graph(4) + P3 = path_graph(3) + nodelist = list(P3.nodes()) + A = nx.to_scipy_sparse_array(P4, nodelist=nodelist) + GA = nx.Graph(A) + assert nx.is_isomorphic(GA, P3) + + pytest.raises(nx.NetworkXError, nx.to_scipy_sparse_array, P3, nodelist=[]) + # Test nodelist duplicates. + long_nl = nodelist + [0] + pytest.raises(nx.NetworkXError, nx.to_scipy_sparse_array, P3, nodelist=long_nl) + + # Test nodelist contains non-nodes + non_nl = [-1, 0, 1, 2] + pytest.raises(nx.NetworkXError, nx.to_scipy_sparse_array, P3, nodelist=non_nl) + + def test_weight_keyword(self): + WP4 = nx.Graph() + WP4.add_edges_from((n, n + 1, {"weight": 0.5, "other": 0.3}) for n in range(3)) + P4 = path_graph(4) + A = nx.to_scipy_sparse_array(P4) + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + np.testing.assert_equal( + 0.5 * A.todense(), nx.to_scipy_sparse_array(WP4).todense() + ) + np.testing.assert_equal( + 0.3 * A.todense(), nx.to_scipy_sparse_array(WP4, weight="other").todense() + ) + + def test_format_keyword(self): + WP4 = nx.Graph() + WP4.add_edges_from((n, n + 1, {"weight": 0.5, "other": 0.3}) for n in range(3)) + P4 = path_graph(4) + A = nx.to_scipy_sparse_array(P4, format="csr") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + A = nx.to_scipy_sparse_array(P4, format="csc") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + A = nx.to_scipy_sparse_array(P4, format="coo") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + A = nx.to_scipy_sparse_array(P4, format="bsr") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + A = nx.to_scipy_sparse_array(P4, format="lil") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + A = nx.to_scipy_sparse_array(P4, format="dia") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + A = nx.to_scipy_sparse_array(P4, format="dok") + np.testing.assert_equal( + A.todense(), nx.to_scipy_sparse_array(WP4, weight=None).todense() + ) + + def test_format_keyword_raise(self): + with pytest.raises(nx.NetworkXError): + WP4 = nx.Graph() + WP4.add_edges_from( + (n, n + 1, {"weight": 0.5, "other": 0.3}) for n in range(3) + ) + P4 = path_graph(4) + nx.to_scipy_sparse_array(P4, format="any_other") + + def test_null_raise(self): + with pytest.raises(nx.NetworkXError): + nx.to_scipy_sparse_array(nx.Graph()) + + def test_empty(self): + G = nx.Graph() + G.add_node(1) + M = nx.to_scipy_sparse_array(G) + np.testing.assert_equal(M.toarray(), np.array([[0]])) + + def test_ordering(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(2, 3) + G.add_edge(3, 1) + M = nx.to_scipy_sparse_array(G, nodelist=[3, 2, 1]) + np.testing.assert_equal( + M.toarray(), np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) + ) + + def test_selfloop_graph(self): + G = nx.Graph([(1, 1)]) + M = nx.to_scipy_sparse_array(G) + np.testing.assert_equal(M.toarray(), np.array([[1]])) + + G.add_edges_from([(2, 3), (3, 4)]) + M = nx.to_scipy_sparse_array(G, nodelist=[2, 3, 4]) + np.testing.assert_equal( + M.toarray(), np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]]) + ) + + def test_selfloop_digraph(self): + G = nx.DiGraph([(1, 1)]) + M = nx.to_scipy_sparse_array(G) + np.testing.assert_equal(M.toarray(), np.array([[1]])) + + G.add_edges_from([(2, 3), (3, 4)]) + M = nx.to_scipy_sparse_array(G, nodelist=[2, 3, 4]) + np.testing.assert_equal( + M.toarray(), np.array([[0, 1, 0], [0, 0, 1], [0, 0, 0]]) + ) + + def test_from_scipy_sparse_array_parallel_edges(self): + """Tests that the :func:`networkx.from_scipy_sparse_array` function + interprets integer weights as the number of parallel edges when + creating a multigraph. + + """ + A = sp.sparse.csr_array([[1, 1], [1, 2]]) + # First, with a simple graph, each integer entry in the adjacency + # matrix is interpreted as the weight of a single edge in the graph. + expected = nx.DiGraph() + edges = [(0, 0), (0, 1), (1, 0)] + expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges]) + expected.add_edge(1, 1, weight=2) + actual = nx.from_scipy_sparse_array( + A, parallel_edges=True, create_using=nx.DiGraph + ) + assert graphs_equal(actual, expected) + actual = nx.from_scipy_sparse_array( + A, parallel_edges=False, create_using=nx.DiGraph + ) + assert graphs_equal(actual, expected) + # Now each integer entry in the adjacency matrix is interpreted as the + # number of parallel edges in the graph if the appropriate keyword + # argument is specified. + edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)] + expected = nx.MultiDiGraph() + expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges]) + actual = nx.from_scipy_sparse_array( + A, parallel_edges=True, create_using=nx.MultiDiGraph + ) + assert graphs_equal(actual, expected) + expected = nx.MultiDiGraph() + expected.add_edges_from(set(edges), weight=1) + # The sole self-loop (edge 0) on vertex 1 should have weight 2. + expected[1][1][0]["weight"] = 2 + actual = nx.from_scipy_sparse_array( + A, parallel_edges=False, create_using=nx.MultiDiGraph + ) + assert graphs_equal(actual, expected) + + def test_symmetric(self): + """Tests that a symmetric matrix has edges added only once to an + undirected multigraph when using + :func:`networkx.from_scipy_sparse_array`. + + """ + A = sp.sparse.csr_array([[0, 1], [1, 0]]) + G = nx.from_scipy_sparse_array(A, create_using=nx.MultiGraph) + expected = nx.MultiGraph() + expected.add_edge(0, 1, weight=1) + assert graphs_equal(G, expected) + + +@pytest.mark.parametrize("sparse_format", ("csr", "csc", "dok")) +def test_from_scipy_sparse_array_formats(sparse_format): + """Test all formats supported by _generate_weighted_edges.""" + # trinode complete graph with non-uniform edge weights + expected = nx.Graph() + expected.add_edges_from( + [ + (0, 1, {"weight": 3}), + (0, 2, {"weight": 2}), + (1, 0, {"weight": 3}), + (1, 2, {"weight": 1}), + (2, 0, {"weight": 2}), + (2, 1, {"weight": 1}), + ] + ) + A = sp.sparse.coo_array([[0, 3, 2], [3, 0, 1], [2, 1, 0]]).asformat(sparse_format) + assert graphs_equal(expected, nx.from_scipy_sparse_array(A)) diff --git a/phivenv/Lib/site-packages/networkx/tests/test_exceptions.py b/phivenv/Lib/site-packages/networkx/tests/test_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..cf59983cb8d12a119f5744ebc8b11e7cb9075366 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/tests/test_exceptions.py @@ -0,0 +1,40 @@ +import pytest + +import networkx as nx + +# smoke tests for exceptions + + +def test_raises_networkxexception(): + with pytest.raises(nx.NetworkXException): + raise nx.NetworkXException + + +def test_raises_networkxerr(): + with pytest.raises(nx.NetworkXError): + raise nx.NetworkXError + + +def test_raises_networkx_pointless_concept(): + with pytest.raises(nx.NetworkXPointlessConcept): + raise nx.NetworkXPointlessConcept + + +def test_raises_networkxalgorithmerr(): + with pytest.raises(nx.NetworkXAlgorithmError): + raise nx.NetworkXAlgorithmError + + +def test_raises_networkx_unfeasible(): + with pytest.raises(nx.NetworkXUnfeasible): + raise nx.NetworkXUnfeasible + + +def test_raises_networkx_no_path(): + with pytest.raises(nx.NetworkXNoPath): + raise nx.NetworkXNoPath + + +def test_raises_networkx_unbounded(): + with pytest.raises(nx.NetworkXUnbounded): + raise nx.NetworkXUnbounded diff --git a/phivenv/Lib/site-packages/networkx/tests/test_import.py b/phivenv/Lib/site-packages/networkx/tests/test_import.py new file mode 100644 index 0000000000000000000000000000000000000000..32aafdf2a4dafc85cee088138590b84f4c627b5e --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/tests/test_import.py @@ -0,0 +1,11 @@ +import pytest + + +def test_namespace_alias(): + with pytest.raises(ImportError): + from networkx import nx + + +def test_namespace_nesting(): + with pytest.raises(ImportError): + from networkx import networkx diff --git a/phivenv/Lib/site-packages/networkx/tests/test_lazy_imports.py b/phivenv/Lib/site-packages/networkx/tests/test_lazy_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..9b7f1b1d94c08b7185ae1798d60170f3b2cdc7a1 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/tests/test_lazy_imports.py @@ -0,0 +1,97 @@ +import importlib +import sys +import types + +import pytest + +import networkx.lazy_imports as lazy + + +def test_lazy_import_basics(): + math = lazy._lazy_import("math") + anything_not_real = lazy._lazy_import("anything_not_real") + + # Now test that accessing attributes does what it should + assert math.sin(math.pi) == pytest.approx(0, 1e-6) + # poor-mans pytest.raises for testing errors on attribute access + try: + anything_not_real.pi + assert False # Should not get here + except ModuleNotFoundError: + pass + assert isinstance(anything_not_real, lazy.DelayedImportErrorModule) + # see if it changes for second access + try: + anything_not_real.pi + assert False # Should not get here + except ModuleNotFoundError: + pass + + +def test_lazy_import_impact_on_sys_modules(): + math = lazy._lazy_import("math") + anything_not_real = lazy._lazy_import("anything_not_real") + + assert type(math) == types.ModuleType + assert "math" in sys.modules + assert type(anything_not_real) == lazy.DelayedImportErrorModule + assert "anything_not_real" not in sys.modules + + # only do this if numpy is installed + np_test = pytest.importorskip("numpy") + np = lazy._lazy_import("numpy") + assert type(np) == types.ModuleType + assert "numpy" in sys.modules + + np.pi # trigger load of numpy + + assert type(np) == types.ModuleType + assert "numpy" in sys.modules + + +def test_lazy_import_nonbuiltins(): + sp = lazy._lazy_import("scipy") + np = lazy._lazy_import("numpy") + if isinstance(sp, lazy.DelayedImportErrorModule): + try: + sp.special.erf + assert False + except ModuleNotFoundError: + pass + elif isinstance(np, lazy.DelayedImportErrorModule): + try: + np.sin(np.pi) + assert False + except ModuleNotFoundError: + pass + else: + assert sp.special.erf(np.pi) == pytest.approx(1, 1e-4) + + +def test_lazy_attach(): + name = "mymod" + submods = ["mysubmodule", "anothersubmodule"] + myall = {"not_real_submod": ["some_var_or_func"]} + + locls = { + "attach": lazy.attach, + "name": name, + "submods": submods, + "myall": myall, + } + s = "__getattr__, __lazy_dir__, __all__ = attach(name, submods, myall)" + + exec(s, {}, locls) + expected = { + "attach": lazy.attach, + "name": name, + "submods": submods, + "myall": myall, + "__getattr__": None, + "__lazy_dir__": None, + "__all__": None, + } + assert locls.keys() == expected.keys() + for k, v in expected.items(): + if v is not None: + assert locls[k] == v diff --git a/phivenv/Lib/site-packages/networkx/tests/test_relabel.py b/phivenv/Lib/site-packages/networkx/tests/test_relabel.py new file mode 100644 index 0000000000000000000000000000000000000000..0ebf4d3ef490afce48e3e1298412edb05a385cdc --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/tests/test_relabel.py @@ -0,0 +1,347 @@ +import pytest + +import networkx as nx +from networkx.generators.classic import empty_graph +from networkx.utils import edges_equal, nodes_equal + + +class TestRelabel: + def test_convert_node_labels_to_integers(self): + # test that empty graph converts fine for all options + G = empty_graph() + H = nx.convert_node_labels_to_integers(G, 100) + assert list(H.nodes()) == [] + assert list(H.edges()) == [] + + for opt in ["default", "sorted", "increasing degree", "decreasing degree"]: + G = empty_graph() + H = nx.convert_node_labels_to_integers(G, 100, ordering=opt) + assert list(H.nodes()) == [] + assert list(H.edges()) == [] + + G = empty_graph() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + H = nx.convert_node_labels_to_integers(G) + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + + H = nx.convert_node_labels_to_integers(G, 1000) + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + assert nodes_equal(H.nodes(), [1000, 1001, 1002, 1003]) + + H = nx.convert_node_labels_to_integers(G, ordering="increasing degree") + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + assert H.degree(0) == 1 + assert H.degree(1) == 2 + assert H.degree(2) == 2 + assert H.degree(3) == 3 + + H = nx.convert_node_labels_to_integers(G, ordering="decreasing degree") + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + assert H.degree(0) == 3 + assert H.degree(1) == 2 + assert H.degree(2) == 2 + assert H.degree(3) == 1 + + H = nx.convert_node_labels_to_integers( + G, ordering="increasing degree", label_attribute="label" + ) + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + assert H.degree(0) == 1 + assert H.degree(1) == 2 + assert H.degree(2) == 2 + assert H.degree(3) == 3 + + # check mapping + assert H.nodes[3]["label"] == "C" + assert H.nodes[0]["label"] == "D" + assert H.nodes[1]["label"] == "A" or H.nodes[2]["label"] == "A" + assert H.nodes[1]["label"] == "B" or H.nodes[2]["label"] == "B" + + def test_convert_to_integers2(self): + G = empty_graph() + G.add_edges_from([("C", "D"), ("A", "B"), ("A", "C"), ("B", "C")]) + H = nx.convert_node_labels_to_integers(G, ordering="sorted") + degH = (d for n, d in H.degree()) + degG = (d for n, d in G.degree()) + assert sorted(degH) == sorted(degG) + + H = nx.convert_node_labels_to_integers( + G, ordering="sorted", label_attribute="label" + ) + assert H.nodes[0]["label"] == "A" + assert H.nodes[1]["label"] == "B" + assert H.nodes[2]["label"] == "C" + assert H.nodes[3]["label"] == "D" + + def test_convert_to_integers_raise(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.convert_node_labels_to_integers(G, ordering="increasing age") + + def test_relabel_nodes_copy(self): + G = nx.empty_graph() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + mapping = {"A": "aardvark", "B": "bear", "C": "cat", "D": "dog"} + H = nx.relabel_nodes(G, mapping) + assert nodes_equal(H.nodes(), ["aardvark", "bear", "cat", "dog"]) + + def test_relabel_nodes_function(self): + G = nx.empty_graph() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + # function mapping no longer encouraged but works + + def mapping(n): + return ord(n) + + H = nx.relabel_nodes(G, mapping) + assert nodes_equal(H.nodes(), [65, 66, 67, 68]) + + def test_relabel_nodes_callable_type(self): + G = nx.path_graph(4) + H = nx.relabel_nodes(G, str) + assert nodes_equal(H.nodes, ["0", "1", "2", "3"]) + + @pytest.mark.parametrize("non_mc", ("0123", ["0", "1", "2", "3"])) + def test_relabel_nodes_non_mapping_or_callable(self, non_mc): + """If `mapping` is neither a Callable or a Mapping, an exception + should be raised.""" + G = nx.path_graph(4) + with pytest.raises(AttributeError): + nx.relabel_nodes(G, non_mc) + + def test_relabel_nodes_graph(self): + G = nx.Graph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + mapping = {"A": "aardvark", "B": "bear", "C": "cat", "D": "dog"} + H = nx.relabel_nodes(G, mapping) + assert nodes_equal(H.nodes(), ["aardvark", "bear", "cat", "dog"]) + + def test_relabel_nodes_orderedgraph(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3]) + G.add_edges_from([(1, 3), (2, 3)]) + mapping = {1: "a", 2: "b", 3: "c"} + H = nx.relabel_nodes(G, mapping) + assert list(H.nodes) == ["a", "b", "c"] + + def test_relabel_nodes_digraph(self): + G = nx.DiGraph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + mapping = {"A": "aardvark", "B": "bear", "C": "cat", "D": "dog"} + H = nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(H.nodes(), ["aardvark", "bear", "cat", "dog"]) + + def test_relabel_nodes_multigraph(self): + G = nx.MultiGraph([("a", "b"), ("a", "b")]) + mapping = {"a": "aardvark", "b": "bear"} + G = nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(G.nodes(), ["aardvark", "bear"]) + assert edges_equal(G.edges(), [("aardvark", "bear"), ("aardvark", "bear")]) + + def test_relabel_nodes_multidigraph(self): + G = nx.MultiDiGraph([("a", "b"), ("a", "b")]) + mapping = {"a": "aardvark", "b": "bear"} + G = nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(G.nodes(), ["aardvark", "bear"]) + assert edges_equal(G.edges(), [("aardvark", "bear"), ("aardvark", "bear")]) + + def test_relabel_isolated_nodes_to_same(self): + G = nx.Graph() + G.add_nodes_from(range(4)) + mapping = {1: 1} + H = nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(H.nodes(), list(range(4))) + + def test_relabel_nodes_missing(self): + G = nx.Graph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + mapping = {0: "aardvark"} + # copy=True + H = nx.relabel_nodes(G, mapping, copy=True) + assert nodes_equal(H.nodes, G.nodes) + # copy=False + GG = G.copy() + nx.relabel_nodes(G, mapping, copy=False) + assert nodes_equal(G.nodes, GG.nodes) + + def test_relabel_copy_name(self): + G = nx.Graph() + H = nx.relabel_nodes(G, {}, copy=True) + assert H.graph == G.graph + H = nx.relabel_nodes(G, {}, copy=False) + assert H.graph == G.graph + G.name = "first" + H = nx.relabel_nodes(G, {}, copy=True) + assert H.graph == G.graph + H = nx.relabel_nodes(G, {}, copy=False) + assert H.graph == G.graph + + def test_relabel_toposort(self): + K5 = nx.complete_graph(4) + G = nx.complete_graph(4) + G = nx.relabel_nodes(G, {i: i + 1 for i in range(4)}, copy=False) + assert nx.is_isomorphic(K5, G) + G = nx.complete_graph(4) + G = nx.relabel_nodes(G, {i: i - 1 for i in range(4)}, copy=False) + assert nx.is_isomorphic(K5, G) + + def test_relabel_selfloop(self): + G = nx.DiGraph([(1, 1), (1, 2), (2, 3)]) + G = nx.relabel_nodes(G, {1: "One", 2: "Two", 3: "Three"}, copy=False) + assert nodes_equal(G.nodes(), ["One", "Three", "Two"]) + G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 3)]) + G = nx.relabel_nodes(G, {1: "One", 2: "Two", 3: "Three"}, copy=False) + assert nodes_equal(G.nodes(), ["One", "Three", "Two"]) + G = nx.MultiDiGraph([(1, 1)]) + G = nx.relabel_nodes(G, {1: 0}, copy=False) + assert nodes_equal(G.nodes(), [0]) + + def test_relabel_multidigraph_inout_merge_nodes(self): + for MG in (nx.MultiGraph, nx.MultiDiGraph): + for cc in (True, False): + G = MG([(0, 4), (1, 4), (4, 2), (4, 3)]) + G[0][4][0]["value"] = "a" + G[1][4][0]["value"] = "b" + G[4][2][0]["value"] = "c" + G[4][3][0]["value"] = "d" + G.add_edge(0, 4, key="x", value="e") + G.add_edge(4, 3, key="x", value="f") + mapping = {0: 9, 1: 9, 2: 9, 3: 9} + H = nx.relabel_nodes(G, mapping, copy=cc) + # No ordering on keys enforced + assert {"value": "a"} in H[9][4].values() + assert {"value": "b"} in H[9][4].values() + assert {"value": "c"} in H[4][9].values() + assert len(H[4][9]) == 3 if G.is_directed() else 6 + assert {"value": "d"} in H[4][9].values() + assert {"value": "e"} in H[9][4].values() + assert {"value": "f"} in H[4][9].values() + assert len(H[9][4]) == 3 if G.is_directed() else 6 + + def test_relabel_multigraph_merge_inplace(self): + G = nx.MultiGraph([(0, 1), (0, 2), (0, 3), (0, 1), (0, 2), (0, 3)]) + G[0][1][0]["value"] = "a" + G[0][2][0]["value"] = "b" + G[0][3][0]["value"] = "c" + mapping = {1: 4, 2: 4, 3: 4} + nx.relabel_nodes(G, mapping, copy=False) + # No ordering on keys enforced + assert {"value": "a"} in G[0][4].values() + assert {"value": "b"} in G[0][4].values() + assert {"value": "c"} in G[0][4].values() + + def test_relabel_multidigraph_merge_inplace(self): + G = nx.MultiDiGraph([(0, 1), (0, 2), (0, 3)]) + G[0][1][0]["value"] = "a" + G[0][2][0]["value"] = "b" + G[0][3][0]["value"] = "c" + mapping = {1: 4, 2: 4, 3: 4} + nx.relabel_nodes(G, mapping, copy=False) + # No ordering on keys enforced + assert {"value": "a"} in G[0][4].values() + assert {"value": "b"} in G[0][4].values() + assert {"value": "c"} in G[0][4].values() + + def test_relabel_multidigraph_inout_copy(self): + G = nx.MultiDiGraph([(0, 4), (1, 4), (4, 2), (4, 3)]) + G[0][4][0]["value"] = "a" + G[1][4][0]["value"] = "b" + G[4][2][0]["value"] = "c" + G[4][3][0]["value"] = "d" + G.add_edge(0, 4, key="x", value="e") + G.add_edge(4, 3, key="x", value="f") + mapping = {0: 9, 1: 9, 2: 9, 3: 9} + H = nx.relabel_nodes(G, mapping, copy=True) + # No ordering on keys enforced + assert {"value": "a"} in H[9][4].values() + assert {"value": "b"} in H[9][4].values() + assert {"value": "c"} in H[4][9].values() + assert len(H[4][9]) == 3 + assert {"value": "d"} in H[4][9].values() + assert {"value": "e"} in H[9][4].values() + assert {"value": "f"} in H[4][9].values() + assert len(H[9][4]) == 3 + + def test_relabel_multigraph_merge_copy(self): + G = nx.MultiGraph([(0, 1), (0, 2), (0, 3)]) + G[0][1][0]["value"] = "a" + G[0][2][0]["value"] = "b" + G[0][3][0]["value"] = "c" + mapping = {1: 4, 2: 4, 3: 4} + H = nx.relabel_nodes(G, mapping, copy=True) + assert {"value": "a"} in H[0][4].values() + assert {"value": "b"} in H[0][4].values() + assert {"value": "c"} in H[0][4].values() + + def test_relabel_multidigraph_merge_copy(self): + G = nx.MultiDiGraph([(0, 1), (0, 2), (0, 3)]) + G[0][1][0]["value"] = "a" + G[0][2][0]["value"] = "b" + G[0][3][0]["value"] = "c" + mapping = {1: 4, 2: 4, 3: 4} + H = nx.relabel_nodes(G, mapping, copy=True) + assert {"value": "a"} in H[0][4].values() + assert {"value": "b"} in H[0][4].values() + assert {"value": "c"} in H[0][4].values() + + def test_relabel_multigraph_nonnumeric_key(self): + for MG in (nx.MultiGraph, nx.MultiDiGraph): + for cc in (True, False): + G = nx.MultiGraph() + G.add_edge(0, 1, key="I", value="a") + G.add_edge(0, 2, key="II", value="b") + G.add_edge(0, 3, key="II", value="c") + mapping = {1: 4, 2: 4, 3: 4} + nx.relabel_nodes(G, mapping, copy=False) + assert {"value": "a"} in G[0][4].values() + assert {"value": "b"} in G[0][4].values() + assert {"value": "c"} in G[0][4].values() + assert 0 in G[0][4] + assert "I" in G[0][4] + assert "II" in G[0][4] + + def test_relabel_circular(self): + G = nx.path_graph(3) + mapping = {0: 1, 1: 0} + H = nx.relabel_nodes(G, mapping, copy=True) + with pytest.raises(nx.NetworkXUnfeasible): + H = nx.relabel_nodes(G, mapping, copy=False) + + def test_relabel_preserve_node_order_full_mapping_with_copy_true(self): + G = nx.path_graph(3) + original_order = list(G.nodes()) + mapping = {2: "a", 1: "b", 0: "c"} # dictionary keys out of order on purpose + H = nx.relabel_nodes(G, mapping, copy=True) + new_order = list(H.nodes()) + assert [mapping.get(i, i) for i in original_order] == new_order + + def test_relabel_preserve_node_order_full_mapping_with_copy_false(self): + G = nx.path_graph(3) + original_order = list(G) + mapping = {2: "a", 1: "b", 0: "c"} # dictionary keys out of order on purpose + H = nx.relabel_nodes(G, mapping, copy=False) + new_order = list(H) + assert [mapping.get(i, i) for i in original_order] == new_order + + def test_relabel_preserve_node_order_partial_mapping_with_copy_true(self): + G = nx.path_graph(3) + original_order = list(G) + mapping = {1: "a", 0: "b"} # partial mapping and keys out of order on purpose + H = nx.relabel_nodes(G, mapping, copy=True) + new_order = list(H) + assert [mapping.get(i, i) for i in original_order] == new_order + + def test_relabel_preserve_node_order_partial_mapping_with_copy_false(self): + G = nx.path_graph(3) + original_order = list(G) + mapping = {1: "a", 0: "b"} # partial mapping and keys out of order on purpose + H = nx.relabel_nodes(G, mapping, copy=False) + new_order = list(H) + assert [mapping.get(i, i) for i in original_order] != new_order diff --git a/phivenv/Lib/site-packages/networkx/utils/__init__.py b/phivenv/Lib/site-packages/networkx/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..48f02c18873c94098ec234cdc39ca3c8cf0a5833 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/__init__.py @@ -0,0 +1,6 @@ +from networkx.utils.misc import * +from networkx.utils.decorators import * +from networkx.utils.random_sequence import * +from networkx.utils.union_find import * +from networkx.utils.rcm import * +from networkx.utils.heaps import * diff --git a/phivenv/Lib/site-packages/networkx/utils/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53ec04481ad32e35f2b7005fafe717c0888e7812 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/__pycache__/backends.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/__pycache__/backends.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e24a2a766188707fb070e6bfd3b62176035dde6 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/__pycache__/backends.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/__pycache__/decorators.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/__pycache__/decorators.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8edeff8885378ee15f2182322e23e23cc99ecdcf Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/__pycache__/decorators.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/__pycache__/heaps.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/__pycache__/heaps.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1887710458d1f32d2a04493e198b880b3eeb5ab3 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/__pycache__/heaps.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/__pycache__/mapped_queue.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/__pycache__/mapped_queue.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee8b1563bc51ceaea38402ce30ccf062cbcd4751 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/__pycache__/mapped_queue.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/__pycache__/misc.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/__pycache__/misc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2df647536279bf5476e6515cfbeda74a93b65da Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/__pycache__/misc.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/__pycache__/random_sequence.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/__pycache__/random_sequence.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36e05842818dca557db6a4508ca66e491bb9ff60 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/__pycache__/random_sequence.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/__pycache__/rcm.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/__pycache__/rcm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84ecf6d2ee103c47dfcc702b250846ff80ec5fe9 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/__pycache__/rcm.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/__pycache__/union_find.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/__pycache__/union_find.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e10ec1fa6dd26a15e14d3f87fb308d33bdb7e01 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/__pycache__/union_find.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/backends.py b/phivenv/Lib/site-packages/networkx/utils/backends.py new file mode 100644 index 0000000000000000000000000000000000000000..3bafa0f692279c015af738fe982ec37f3ee17b01 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/backends.py @@ -0,0 +1,975 @@ +""" +Code to support various backends in a plugin dispatch architecture. + +Create a Dispatcher +------------------- + +To be a valid backend, a package must register an entry_point +of `networkx.backends` with a key pointing to the handler. + +For example:: + + entry_points={'networkx.backends': 'sparse = networkx_backend_sparse'} + +The backend must create a Graph-like object which contains an attribute +``__networkx_backend__`` with a value of the entry point name. + +Continuing the example above:: + + class WrappedSparse: + __networkx_backend__ = "sparse" + ... + +When a dispatchable NetworkX algorithm encounters a Graph-like object +with a ``__networkx_backend__`` attribute, it will look for the associated +dispatch object in the entry_points, load it, and dispatch the work to it. + + +Testing +------- +To assist in validating the backend algorithm implementations, if an +environment variable ``NETWORKX_TEST_BACKEND`` is set to a registered +backend key, the dispatch machinery will automatically convert regular +networkx Graphs and DiGraphs to the backend equivalent by calling +``.convert_from_nx(G, edge_attrs=edge_attrs, name=name)``. +Set ``NETWORKX_FALLBACK_TO_NX`` environment variable to have tests +use networkx graphs for algorithms not implemented by the backend. + +The arguments to ``convert_from_nx`` are: + +- ``G`` : networkx Graph +- ``edge_attrs`` : dict, optional + Dict that maps edge attributes to default values if missing in ``G``. + If None, then no edge attributes will be converted and default may be 1. +- ``node_attrs``: dict, optional + Dict that maps node attribute to default values if missing in ``G``. + If None, then no node attributes will be converted. +- ``preserve_edge_attrs`` : bool + Whether to preserve all edge attributes. +- ``preserve_node_attrs`` : bool + Whether to preserve all node attributes. +- ``preserve_graph_attrs`` : bool + Whether to preserve all graph attributes. +- ``preserve_all_attrs`` : bool + Whether to preserve all graph, node, and edge attributes. +- ``name`` : str + The name of the algorithm. +- ``graph_name`` : str + The name of the graph argument being converted. + +The converted object is then passed to the backend implementation of +the algorithm. The result is then passed to +``.convert_to_nx(result, name=name)`` to convert back +to a form expected by the NetworkX tests. + +By defining ``convert_from_nx`` and ``convert_to_nx`` methods and setting +the environment variable, NetworkX will automatically route tests on +dispatchable algorithms to the backend, allowing the full networkx test +suite to be run against the backend implementation. + +Example pytest invocation:: + + NETWORKX_TEST_BACKEND=sparse pytest --pyargs networkx + +Dispatchable algorithms which are not implemented by the backend +will cause a ``pytest.xfail()``, giving some indication that not all +tests are working, while avoiding causing an explicit failure. + +If a backend only partially implements some algorithms, it can define +a ``can_run(name, args, kwargs)`` function that returns True or False +indicating whether it can run the algorithm with the given arguments. + +A special ``on_start_tests(items)`` function may be defined by the backend. +It will be called with the list of NetworkX tests discovered. Each item +is a test object that can be marked as xfail if the backend does not support +the test using `item.add_marker(pytest.mark.xfail(reason=...))`. +""" +import inspect +import os +import sys +import warnings +from functools import partial +from importlib.metadata import entry_points + +from ..exception import NetworkXNotImplemented + +__all__ = ["_dispatch"] + + +def _get_backends(group, *, load_and_call=False): + if sys.version_info < (3, 10): + eps = entry_points() + if group not in eps: + return {} + items = eps[group] + else: + items = entry_points(group=group) + rv = {} + for ep in items: + if ep.name in rv: + warnings.warn( + f"networkx backend defined more than once: {ep.name}", + RuntimeWarning, + stacklevel=2, + ) + elif load_and_call: + try: + rv[ep.name] = ep.load()() + except Exception as exc: + warnings.warn( + f"Error encountered when loading info for backend {ep.name}: {exc}", + RuntimeWarning, + stacklevel=2, + ) + else: + rv[ep.name] = ep + # nx-loopback backend is only available when testing (added in conftest.py) + rv.pop("nx-loopback", None) + return rv + + +# Rename "plugin" to "backend", and give backends a release cycle to update. +backends = _get_backends("networkx.plugins") +backend_info = _get_backends("networkx.plugin_info", load_and_call=True) + +backends.update(_get_backends("networkx.backends")) +backend_info.update(_get_backends("networkx.backend_info", load_and_call=True)) + +# Load and cache backends on-demand +_loaded_backends = {} # type: ignore[var-annotated] + + +def _load_backend(backend_name): + if backend_name in _loaded_backends: + return _loaded_backends[backend_name] + rv = _loaded_backends[backend_name] = backends[backend_name].load() + return rv + + +_registered_algorithms = {} + + +class _dispatch: + """Dispatches to a backend algorithm based on input graph types. + + Parameters + ---------- + func : function + + name : str, optional + The name of the algorithm to use for dispatching. If not provided, + the name of ``func`` will be used. ``name`` is useful to avoid name + conflicts, as all dispatched algorithms live in a single namespace. + + graphs : str or dict or None, default "G" + If a string, the parameter name of the graph, which must be the first + argument of the wrapped function. If more than one graph is required + for the algorithm (or if the graph is not the first argument), provide + a dict of parameter name to argument position for each graph argument. + For example, ``@_dispatch(graphs={"G": 0, "auxiliary?": 4})`` + indicates the 0th parameter ``G`` of the function is a required graph, + and the 4th parameter ``auxiliary`` is an optional graph. + To indicate an argument is a list of graphs, do e.g. ``"[graphs]"``. + Use ``graphs=None`` if *no* arguments are NetworkX graphs such as for + graph generators, readers, and conversion functions. + + edge_attrs : str or dict, optional + ``edge_attrs`` holds information about edge attribute arguments + and default values for those edge attributes. + If a string, ``edge_attrs`` holds the function argument name that + indicates a single edge attribute to include in the converted graph. + The default value for this attribute is 1. To indicate that an argument + is a list of attributes (all with default value 1), use e.g. ``"[attrs]"``. + If a dict, ``edge_attrs`` holds a dict keyed by argument names, with + values that are either the default value or, if a string, the argument + name that indicates the default value. + + node_attrs : str or dict, optional + Like ``edge_attrs``, but for node attributes. + + preserve_edge_attrs : bool or str or dict, optional + For bool, whether to preserve all edge attributes. + For str, the parameter name that may indicate (with ``True`` or a + callable argument) whether all edge attributes should be preserved + when converting. + For dict of ``{graph_name: {attr: default}}``, indicate pre-determined + edge attributes (and defaults) to preserve for input graphs. + + preserve_node_attrs : bool or str or dict, optional + Like ``preserve_edge_attrs``, but for node attributes. + + preserve_graph_attrs : bool or set + For bool, whether to preserve all graph attributes. + For set, which input graph arguments to preserve graph attributes. + + preserve_all_attrs : bool + Whether to preserve all edge, node and graph attributes. + This overrides all the other preserve_*_attrs. + + """ + + # Allow any of the following decorator forms: + # - @_dispatch + # - @_dispatch() + # - @_dispatch(name="override_name") + # - @_dispatch(graphs="graph") + # - @_dispatch(edge_attrs="weight") + # - @_dispatch(graphs={"G": 0, "H": 1}, edge_attrs={"weight": "default"}) + + # These class attributes are currently used to allow backends to run networkx tests. + # For example: `PYTHONPATH=. pytest --backend graphblas --fallback-to-nx` + # Future work: add configuration to control these + _is_testing = False + _fallback_to_nx = ( + os.environ.get("NETWORKX_FALLBACK_TO_NX", "true").strip().lower() == "true" + ) + _automatic_backends = [ + x.strip() + for x in os.environ.get("NETWORKX_AUTOMATIC_BACKENDS", "").split(",") + if x.strip() + ] + + def __new__( + cls, + func=None, + *, + name=None, + graphs="G", + edge_attrs=None, + node_attrs=None, + preserve_edge_attrs=False, + preserve_node_attrs=False, + preserve_graph_attrs=False, + preserve_all_attrs=False, + ): + if func is None: + return partial( + _dispatch, + name=name, + graphs=graphs, + edge_attrs=edge_attrs, + node_attrs=node_attrs, + preserve_edge_attrs=preserve_edge_attrs, + preserve_node_attrs=preserve_node_attrs, + preserve_graph_attrs=preserve_graph_attrs, + preserve_all_attrs=preserve_all_attrs, + ) + if isinstance(func, str): + raise TypeError("'name' and 'graphs' must be passed by keyword") from None + # If name not provided, use the name of the function + if name is None: + name = func.__name__ + + self = object.__new__(cls) + + # standard function-wrapping stuff + # __annotations__ not used + self.__name__ = func.__name__ + # self.__doc__ = func.__doc__ # __doc__ handled as cached property + self.__defaults__ = func.__defaults__ + # We "magically" add `backend=` keyword argument to allow backend to be specified + if func.__kwdefaults__: + self.__kwdefaults__ = {**func.__kwdefaults__, "backend": None} + else: + self.__kwdefaults__ = {"backend": None} + self.__module__ = func.__module__ + self.__qualname__ = func.__qualname__ + self.__dict__.update(func.__dict__) + self.__wrapped__ = func + + # Supplement docstring with backend info; compute and cache when needed + self._orig_doc = func.__doc__ + self._cached_doc = None + + self.orig_func = func + self.name = name + self.edge_attrs = edge_attrs + self.node_attrs = node_attrs + self.preserve_edge_attrs = preserve_edge_attrs or preserve_all_attrs + self.preserve_node_attrs = preserve_node_attrs or preserve_all_attrs + self.preserve_graph_attrs = preserve_graph_attrs or preserve_all_attrs + + if edge_attrs is not None and not isinstance(edge_attrs, (str, dict)): + raise TypeError( + f"Bad type for edge_attrs: {type(edge_attrs)}. Expected str or dict." + ) from None + if node_attrs is not None and not isinstance(node_attrs, (str, dict)): + raise TypeError( + f"Bad type for node_attrs: {type(node_attrs)}. Expected str or dict." + ) from None + if not isinstance(self.preserve_edge_attrs, (bool, str, dict)): + raise TypeError( + f"Bad type for preserve_edge_attrs: {type(self.preserve_edge_attrs)}." + " Expected bool, str, or dict." + ) from None + if not isinstance(self.preserve_node_attrs, (bool, str, dict)): + raise TypeError( + f"Bad type for preserve_node_attrs: {type(self.preserve_node_attrs)}." + " Expected bool, str, or dict." + ) from None + if not isinstance(self.preserve_graph_attrs, (bool, set)): + raise TypeError( + f"Bad type for preserve_graph_attrs: {type(self.preserve_graph_attrs)}." + " Expected bool or set." + ) from None + + if isinstance(graphs, str): + graphs = {graphs: 0} + elif graphs is None: + pass + elif not isinstance(graphs, dict): + raise TypeError( + f"Bad type for graphs: {type(graphs)}. Expected str or dict." + ) from None + elif len(graphs) == 0: + raise KeyError("'graphs' must contain at least one variable name") from None + + # This dict comprehension is complicated for better performance; equivalent shown below. + self.optional_graphs = set() + self.list_graphs = set() + if graphs is None: + self.graphs = {} + else: + self.graphs = { + self.optional_graphs.add(val := k[:-1]) or val + if (last := k[-1]) == "?" + else self.list_graphs.add(val := k[1:-1]) or val + if last == "]" + else k: v + for k, v in graphs.items() + } + # The above is equivalent to: + # self.optional_graphs = {k[:-1] for k in graphs if k[-1] == "?"} + # self.list_graphs = {k[1:-1] for k in graphs if k[-1] == "]"} + # self.graphs = {k[:-1] if k[-1] == "?" else k: v for k, v in graphs.items()} + + # Compute and cache the signature on-demand + self._sig = None + + # Which backends implement this function? + self.backends = { + backend + for backend, info in backend_info.items() + if "functions" in info and name in info["functions"] + } + + if name in _registered_algorithms: + raise KeyError( + f"Algorithm already exists in dispatch registry: {name}" + ) from None + _registered_algorithms[name] = self + return self + + @property + def __doc__(self): + if (rv := self._cached_doc) is not None: + return rv + rv = self._cached_doc = self._make_doc() + return rv + + @__doc__.setter + def __doc__(self, val): + self._orig_doc = val + self._cached_doc = None + + @property + def __signature__(self): + if self._sig is None: + sig = inspect.signature(self.orig_func) + # `backend` is now a reserved argument used by dispatching. + # assert "backend" not in sig.parameters + if not any( + p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values() + ): + sig = sig.replace( + parameters=[ + *sig.parameters.values(), + inspect.Parameter( + "backend", inspect.Parameter.KEYWORD_ONLY, default=None + ), + inspect.Parameter( + "backend_kwargs", inspect.Parameter.VAR_KEYWORD + ), + ] + ) + else: + *parameters, var_keyword = sig.parameters.values() + sig = sig.replace( + parameters=[ + *parameters, + inspect.Parameter( + "backend", inspect.Parameter.KEYWORD_ONLY, default=None + ), + var_keyword, + ] + ) + self._sig = sig + return self._sig + + def __call__(self, /, *args, backend=None, **kwargs): + if not backends: + # Fast path if no backends are installed + return self.orig_func(*args, **kwargs) + + # Use `backend_name` in this function instead of `backend` + backend_name = backend + if backend_name is not None and backend_name not in backends: + raise ImportError(f"Unable to load backend: {backend_name}") + + graphs_resolved = {} + for gname, pos in self.graphs.items(): + if pos < len(args): + if gname in kwargs: + raise TypeError(f"{self.name}() got multiple values for {gname!r}") + val = args[pos] + elif gname in kwargs: + val = kwargs[gname] + elif gname not in self.optional_graphs: + raise TypeError( + f"{self.name}() missing required graph argument: {gname}" + ) + else: + continue + if val is None: + if gname not in self.optional_graphs: + raise TypeError( + f"{self.name}() required graph argument {gname!r} is None; must be a graph" + ) + else: + graphs_resolved[gname] = val + + # Alternative to the above that does not check duplicated args or missing required graphs. + # graphs_resolved = { + # val + # for gname, pos in self.graphs.items() + # if (val := args[pos] if pos < len(args) else kwargs.get(gname)) is not None + # } + + if self._is_testing and self._automatic_backends and backend_name is None: + # Special path if we are running networkx tests with a backend. + return self._convert_and_call_for_tests( + self._automatic_backends[0], + args, + kwargs, + fallback_to_nx=self._fallback_to_nx, + ) + + # Check if any graph comes from a backend + if self.list_graphs: + # Make sure we don't lose values by consuming an iterator + args = list(args) + for gname in self.list_graphs & graphs_resolved.keys(): + val = list(graphs_resolved[gname]) + graphs_resolved[gname] = val + if gname in kwargs: + kwargs[gname] = val + else: + args[self.graphs[gname]] = val + + has_backends = any( + hasattr(g, "__networkx_backend__") or hasattr(g, "__networkx_plugin__") + if gname not in self.list_graphs + else any( + hasattr(g2, "__networkx_backend__") + or hasattr(g2, "__networkx_plugin__") + for g2 in g + ) + for gname, g in graphs_resolved.items() + ) + if has_backends: + graph_backend_names = { + getattr( + g, + "__networkx_backend__", + getattr(g, "__networkx_plugin__", "networkx"), + ) + for gname, g in graphs_resolved.items() + if gname not in self.list_graphs + } + for gname in self.list_graphs & graphs_resolved.keys(): + graph_backend_names.update( + getattr( + g, + "__networkx_backend__", + getattr(g, "__networkx_plugin__", "networkx"), + ) + for g in graphs_resolved[gname] + ) + else: + has_backends = any( + hasattr(g, "__networkx_backend__") or hasattr(g, "__networkx_plugin__") + for g in graphs_resolved.values() + ) + if has_backends: + graph_backend_names = { + getattr( + g, + "__networkx_backend__", + getattr(g, "__networkx_plugin__", "networkx"), + ) + for g in graphs_resolved.values() + } + if has_backends: + # Dispatchable graphs found! Dispatch to backend function. + # We don't handle calls with different backend graphs yet, + # but we may be able to convert additional networkx graphs. + backend_names = graph_backend_names - {"networkx"} + if len(backend_names) != 1: + # Future work: convert between backends and run if multiple backends found + raise TypeError( + f"{self.name}() graphs must all be from the same backend, found {backend_names}" + ) + [graph_backend_name] = backend_names + if backend_name is not None and backend_name != graph_backend_name: + # Future work: convert between backends to `backend_name` backend + raise TypeError( + f"{self.name}() is unable to convert graph from backend {graph_backend_name!r} " + f"to the specified backend {backend_name!r}." + ) + if graph_backend_name not in backends: + raise ImportError(f"Unable to load backend: {graph_backend_name}") + if ( + "networkx" in graph_backend_names + and graph_backend_name not in self._automatic_backends + ): + # Not configured to convert networkx graphs to this backend + raise TypeError( + f"Unable to convert inputs and run {self.name}. " + f"{self.name}() has networkx and {graph_backend_name} graphs, but NetworkX is not " + f"configured to automatically convert graphs from networkx to {graph_backend_name}." + ) + backend = _load_backend(graph_backend_name) + if hasattr(backend, self.name): + if "networkx" in graph_backend_names: + # We need to convert networkx graphs to backend graphs + return self._convert_and_call( + graph_backend_name, + args, + kwargs, + fallback_to_nx=self._fallback_to_nx, + ) + # All graphs are backend graphs--no need to convert! + return getattr(backend, self.name)(*args, **kwargs) + # Future work: try to convert and run with other backends in self._automatic_backends + raise NetworkXNotImplemented( + f"'{self.name}' not implemented by {graph_backend_name}" + ) + + # If backend was explicitly given by the user, so we need to use it no matter what + if backend_name is not None: + return self._convert_and_call( + backend_name, args, kwargs, fallback_to_nx=False + ) + + # Only networkx graphs; try to convert and run with a backend with automatic + # conversion, but don't do this by default for graph generators or loaders. + if self.graphs: + for backend_name in self._automatic_backends: + if self._can_backend_run(backend_name, *args, **kwargs): + return self._convert_and_call( + backend_name, + args, + kwargs, + fallback_to_nx=self._fallback_to_nx, + ) + # Default: run with networkx on networkx inputs + return self.orig_func(*args, **kwargs) + + def _can_backend_run(self, backend_name, /, *args, **kwargs): + """Can the specified backend run this algorithms with these arguments?""" + backend = _load_backend(backend_name) + return hasattr(backend, self.name) and ( + not hasattr(backend, "can_run") or backend.can_run(self.name, args, kwargs) + ) + + def _convert_arguments(self, backend_name, args, kwargs): + """Convert graph arguments to the specified backend. + + Returns + ------- + args tuple and kwargs dict + """ + bound = self.__signature__.bind(*args, **kwargs) + bound.apply_defaults() + if not self.graphs: + bound_kwargs = bound.kwargs + del bound_kwargs["backend"] + return bound.args, bound_kwargs + # Convert graphs into backend graph-like object + # Include the edge and/or node labels if provided to the algorithm + preserve_edge_attrs = self.preserve_edge_attrs + edge_attrs = self.edge_attrs + if preserve_edge_attrs is False: + # e.g. `preserve_edge_attrs=False` + pass + elif preserve_edge_attrs is True: + # e.g. `preserve_edge_attrs=True` + edge_attrs = None + elif isinstance(preserve_edge_attrs, str): + if bound.arguments[preserve_edge_attrs] is True or callable( + bound.arguments[preserve_edge_attrs] + ): + # e.g. `preserve_edge_attrs="attr"` and `func(attr=True)` + # e.g. `preserve_edge_attrs="attr"` and `func(attr=myfunc)` + preserve_edge_attrs = True + edge_attrs = None + elif bound.arguments[preserve_edge_attrs] is False and ( + isinstance(edge_attrs, str) + and edge_attrs == preserve_edge_attrs + or isinstance(edge_attrs, dict) + and preserve_edge_attrs in edge_attrs + ): + # e.g. `preserve_edge_attrs="attr"` and `func(attr=False)` + # Treat `False` argument as meaning "preserve_edge_data=False" + # and not `False` as the edge attribute to use. + preserve_edge_attrs = False + edge_attrs = None + else: + # e.g. `preserve_edge_attrs="attr"` and `func(attr="weight")` + preserve_edge_attrs = False + # Else: e.g. `preserve_edge_attrs={"G": {"weight": 1}}` + + if edge_attrs is None: + # May have been set to None above b/c all attributes are preserved + pass + elif isinstance(edge_attrs, str): + if edge_attrs[0] == "[": + # e.g. `edge_attrs="[edge_attributes]"` (argument of list of attributes) + # e.g. `func(edge_attributes=["foo", "bar"])` + edge_attrs = { + edge_attr: 1 for edge_attr in bound.arguments[edge_attrs[1:-1]] + } + elif callable(bound.arguments[edge_attrs]): + # e.g. `edge_attrs="weight"` and `func(weight=myfunc)` + preserve_edge_attrs = True + edge_attrs = None + elif bound.arguments[edge_attrs] is not None: + # e.g. `edge_attrs="weight"` and `func(weight="foo")` (default of 1) + edge_attrs = {bound.arguments[edge_attrs]: 1} + elif self.name == "to_numpy_array" and hasattr( + bound.arguments["dtype"], "names" + ): + # Custom handling: attributes may be obtained from `dtype` + edge_attrs = { + edge_attr: 1 for edge_attr in bound.arguments["dtype"].names + } + else: + # e.g. `edge_attrs="weight"` and `func(weight=None)` + edge_attrs = None + else: + # e.g. `edge_attrs={"attr": "default"}` and `func(attr="foo", default=7)` + # e.g. `edge_attrs={"attr": 0}` and `func(attr="foo")` + edge_attrs = { + edge_attr: bound.arguments.get(val, 1) if isinstance(val, str) else val + for key, val in edge_attrs.items() + if (edge_attr := bound.arguments[key]) is not None + } + + preserve_node_attrs = self.preserve_node_attrs + node_attrs = self.node_attrs + if preserve_node_attrs is False: + # e.g. `preserve_node_attrs=False` + pass + elif preserve_node_attrs is True: + # e.g. `preserve_node_attrs=True` + node_attrs = None + elif isinstance(preserve_node_attrs, str): + if bound.arguments[preserve_node_attrs] is True or callable( + bound.arguments[preserve_node_attrs] + ): + # e.g. `preserve_node_attrs="attr"` and `func(attr=True)` + # e.g. `preserve_node_attrs="attr"` and `func(attr=myfunc)` + preserve_node_attrs = True + node_attrs = None + elif bound.arguments[preserve_node_attrs] is False and ( + isinstance(node_attrs, str) + and node_attrs == preserve_node_attrs + or isinstance(node_attrs, dict) + and preserve_node_attrs in node_attrs + ): + # e.g. `preserve_node_attrs="attr"` and `func(attr=False)` + # Treat `False` argument as meaning "preserve_node_data=False" + # and not `False` as the node attribute to use. Is this used? + preserve_node_attrs = False + node_attrs = None + else: + # e.g. `preserve_node_attrs="attr"` and `func(attr="weight")` + preserve_node_attrs = False + # Else: e.g. `preserve_node_attrs={"G": {"pos": None}}` + + if node_attrs is None: + # May have been set to None above b/c all attributes are preserved + pass + elif isinstance(node_attrs, str): + if node_attrs[0] == "[": + # e.g. `node_attrs="[node_attributes]"` (argument of list of attributes) + # e.g. `func(node_attributes=["foo", "bar"])` + node_attrs = { + node_attr: None for node_attr in bound.arguments[node_attrs[1:-1]] + } + elif callable(bound.arguments[node_attrs]): + # e.g. `node_attrs="weight"` and `func(weight=myfunc)` + preserve_node_attrs = True + node_attrs = None + elif bound.arguments[node_attrs] is not None: + # e.g. `node_attrs="weight"` and `func(weight="foo")` + node_attrs = {bound.arguments[node_attrs]: None} + else: + # e.g. `node_attrs="weight"` and `func(weight=None)` + node_attrs = None + else: + # e.g. `node_attrs={"attr": "default"}` and `func(attr="foo", default=7)` + # e.g. `node_attrs={"attr": 0}` and `func(attr="foo")` + node_attrs = { + node_attr: bound.arguments.get(val) if isinstance(val, str) else val + for key, val in node_attrs.items() + if (node_attr := bound.arguments[key]) is not None + } + + preserve_graph_attrs = self.preserve_graph_attrs + + # It should be safe to assume that we either have networkx graphs or backend graphs. + # Future work: allow conversions between backends. + backend = _load_backend(backend_name) + for gname in self.graphs: + if gname in self.list_graphs: + bound.arguments[gname] = [ + backend.convert_from_nx( + g, + edge_attrs=edge_attrs, + node_attrs=node_attrs, + preserve_edge_attrs=preserve_edge_attrs, + preserve_node_attrs=preserve_node_attrs, + preserve_graph_attrs=preserve_graph_attrs, + name=self.name, + graph_name=gname, + ) + if getattr( + g, + "__networkx_backend__", + getattr(g, "__networkx_plugin__", "networkx"), + ) + == "networkx" + else g + for g in bound.arguments[gname] + ] + else: + graph = bound.arguments[gname] + if graph is None: + if gname in self.optional_graphs: + continue + raise TypeError( + f"Missing required graph argument `{gname}` in {self.name} function" + ) + if isinstance(preserve_edge_attrs, dict): + preserve_edges = False + edges = preserve_edge_attrs.get(gname, edge_attrs) + else: + preserve_edges = preserve_edge_attrs + edges = edge_attrs + if isinstance(preserve_node_attrs, dict): + preserve_nodes = False + nodes = preserve_node_attrs.get(gname, node_attrs) + else: + preserve_nodes = preserve_node_attrs + nodes = node_attrs + if isinstance(preserve_graph_attrs, set): + preserve_graph = gname in preserve_graph_attrs + else: + preserve_graph = preserve_graph_attrs + if ( + getattr( + graph, + "__networkx_backend__", + getattr(graph, "__networkx_plugin__", "networkx"), + ) + == "networkx" + ): + bound.arguments[gname] = backend.convert_from_nx( + graph, + edge_attrs=edges, + node_attrs=nodes, + preserve_edge_attrs=preserve_edges, + preserve_node_attrs=preserve_nodes, + preserve_graph_attrs=preserve_graph, + name=self.name, + graph_name=gname, + ) + bound_kwargs = bound.kwargs + del bound_kwargs["backend"] + return bound.args, bound_kwargs + + def _convert_and_call(self, backend_name, args, kwargs, *, fallback_to_nx=False): + """Call this dispatchable function with a backend, converting graphs if necessary.""" + backend = _load_backend(backend_name) + if not self._can_backend_run(backend_name, *args, **kwargs): + if fallback_to_nx: + return self.orig_func(*args, **kwargs) + msg = f"'{self.name}' not implemented by {backend_name}" + if hasattr(backend, self.name): + msg += " with the given arguments" + raise RuntimeError(msg) + + try: + converted_args, converted_kwargs = self._convert_arguments( + backend_name, args, kwargs + ) + result = getattr(backend, self.name)(*converted_args, **converted_kwargs) + except (NotImplementedError, NetworkXNotImplemented) as exc: + if fallback_to_nx: + return self.orig_func(*args, **kwargs) + raise + + return result + + def _convert_and_call_for_tests( + self, backend_name, args, kwargs, *, fallback_to_nx=False + ): + """Call this dispatchable function with a backend; for use with testing.""" + backend = _load_backend(backend_name) + if not self._can_backend_run(backend_name, *args, **kwargs): + if fallback_to_nx or not self.graphs: + return self.orig_func(*args, **kwargs) + + import pytest + + msg = f"'{self.name}' not implemented by {backend_name}" + if hasattr(backend, self.name): + msg += " with the given arguments" + pytest.xfail(msg) + + try: + converted_args, converted_kwargs = self._convert_arguments( + backend_name, args, kwargs + ) + result = getattr(backend, self.name)(*converted_args, **converted_kwargs) + except (NotImplementedError, NetworkXNotImplemented) as exc: + if fallback_to_nx: + return self.orig_func(*args, **kwargs) + import pytest + + pytest.xfail( + exc.args[0] if exc.args else f"{self.name} raised {type(exc).__name__}" + ) + + if self.name in { + "edmonds_karp_core", + "barycenter", + "contracted_nodes", + "stochastic_graph", + "relabel_nodes", + }: + # Special-case algorithms that mutate input graphs + bound = self.__signature__.bind(*converted_args, **converted_kwargs) + bound.apply_defaults() + bound2 = self.__signature__.bind(*args, **kwargs) + bound2.apply_defaults() + if self.name == "edmonds_karp_core": + R1 = backend.convert_to_nx(bound.arguments["R"]) + R2 = bound2.arguments["R"] + for k, v in R1.edges.items(): + R2.edges[k]["flow"] = v["flow"] + elif self.name == "barycenter" and bound.arguments["attr"] is not None: + G1 = backend.convert_to_nx(bound.arguments["G"]) + G2 = bound2.arguments["G"] + attr = bound.arguments["attr"] + for k, v in G1.nodes.items(): + G2.nodes[k][attr] = v[attr] + elif self.name == "contracted_nodes" and not bound.arguments["copy"]: + # Edges and nodes changed; node "contraction" and edge "weight" attrs + G1 = backend.convert_to_nx(bound.arguments["G"]) + G2 = bound2.arguments["G"] + G2.__dict__.update(G1.__dict__) + elif self.name == "stochastic_graph" and not bound.arguments["copy"]: + G1 = backend.convert_to_nx(bound.arguments["G"]) + G2 = bound2.arguments["G"] + for k, v in G1.edges.items(): + G2.edges[k]["weight"] = v["weight"] + elif self.name == "relabel_nodes" and not bound.arguments["copy"]: + G1 = backend.convert_to_nx(bound.arguments["G"]) + G2 = bound2.arguments["G"] + if G1 is G2: + return G2 + G2._node.clear() + G2._node.update(G1._node) + G2._adj.clear() + G2._adj.update(G1._adj) + if hasattr(G1, "_pred") and hasattr(G2, "_pred"): + G2._pred.clear() + G2._pred.update(G1._pred) + if hasattr(G1, "_succ") and hasattr(G2, "_succ"): + G2._succ.clear() + G2._succ.update(G1._succ) + return G2 + + return backend.convert_to_nx(result, name=self.name) + + def _make_doc(self): + if not self.backends: + return self._orig_doc + lines = [ + "Backends", + "--------", + ] + for backend in sorted(self.backends): + info = backend_info[backend] + if "short_summary" in info: + lines.append(f"{backend} : {info['short_summary']}") + else: + lines.append(backend) + if "functions" not in info or self.name not in info["functions"]: + lines.append("") + continue + + func_info = info["functions"][self.name] + if "extra_docstring" in func_info: + lines.extend( + f" {line}" if line else line + for line in func_info["extra_docstring"].split("\n") + ) + add_gap = True + else: + add_gap = False + if "extra_parameters" in func_info: + if add_gap: + lines.append("") + lines.append(" Extra parameters:") + extra_parameters = func_info["extra_parameters"] + for param in sorted(extra_parameters): + lines.append(f" {param}") + if desc := extra_parameters[param]: + lines.append(f" {desc}") + lines.append("") + else: + lines.append("") + + lines.pop() # Remove last empty line + to_add = "\n ".join(lines) + return f"{self._orig_doc.rstrip()}\n\n {to_add}" + + def __reduce__(self): + """Allow this object to be serialized with pickle. + + This uses the global registry `_registered_algorithms` to deserialize. + """ + return _restore_dispatch, (self.name,) + + +def _restore_dispatch(name): + return _registered_algorithms[name] + + +if os.environ.get("_NETWORKX_BUILDING_DOCS_"): + # When building docs with Sphinx, use the original function with the + # dispatched __doc__, b/c Sphinx renders normal Python functions better. + # This doesn't show e.g. `*, backend=None, **backend_kwargs` in the + # signatures, which is probably okay. It does allow the docstring to be + # updated based on the installed backends. + _orig_dispatch = _dispatch + + def _dispatch(func=None, **kwargs): # type: ignore[no-redef] + if func is None: + return partial(_dispatch, **kwargs) + dispatched_func = _orig_dispatch(func, **kwargs) + func.__doc__ = dispatched_func.__doc__ + return func diff --git a/phivenv/Lib/site-packages/networkx/utils/decorators.py b/phivenv/Lib/site-packages/networkx/utils/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..cc15882da72891a855b727c117b9125d196588c2 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/decorators.py @@ -0,0 +1,1270 @@ +import bz2 +import collections +import gzip +import inspect +import itertools +import re +import warnings +from collections import defaultdict +from contextlib import contextmanager +from functools import wraps +from inspect import Parameter, signature +from os.path import splitext +from pathlib import Path + +import networkx as nx +from networkx.utils import create_py_random_state, create_random_state + +__all__ = [ + "not_implemented_for", + "open_file", + "nodes_or_number", + "np_random_state", + "py_random_state", + "argmap", + "deprecate_positional_args", +] + + +def not_implemented_for(*graph_types): + """Decorator to mark algorithms as not implemented + + Parameters + ---------- + graph_types : container of strings + Entries must be one of "directed", "undirected", "multigraph", or "graph". + + Returns + ------- + _require : function + The decorated function. + + Raises + ------ + NetworkXNotImplemented + If any of the packages cannot be imported + + Notes + ----- + Multiple types are joined logically with "and". + For "or" use multiple @not_implemented_for() lines. + + Examples + -------- + Decorate functions like this:: + + @not_implemented_for("directed") + def sp_function(G): + pass + + # rule out MultiDiGraph + @not_implemented_for("directed","multigraph") + def sp_np_function(G): + pass + + # rule out all except DiGraph + @not_implemented_for("undirected") + @not_implemented_for("multigraph") + def sp_np_function(G): + pass + """ + if ("directed" in graph_types) and ("undirected" in graph_types): + raise ValueError("Function not implemented on directed AND undirected graphs?") + if ("multigraph" in graph_types) and ("graph" in graph_types): + raise ValueError("Function not implemented on graph AND multigraphs?") + if not set(graph_types) < {"directed", "undirected", "multigraph", "graph"}: + raise KeyError( + "use one or more of directed, undirected, multigraph, graph. " + f"You used {graph_types}" + ) + + # 3-way logic: True if "directed" input, False if "undirected" input, else None + dval = ("directed" in graph_types) or "undirected" not in graph_types and None + mval = ("multigraph" in graph_types) or "graph" not in graph_types and None + errmsg = f"not implemented for {' '.join(graph_types)} type" + + def _not_implemented_for(g): + if (mval is None or mval == g.is_multigraph()) and ( + dval is None or dval == g.is_directed() + ): + raise nx.NetworkXNotImplemented(errmsg) + + return g + + return argmap(_not_implemented_for, 0) + + +# To handle new extensions, define a function accepting a `path` and `mode`. +# Then add the extension to _dispatch_dict. +fopeners = { + ".gz": gzip.open, + ".gzip": gzip.open, + ".bz2": bz2.BZ2File, +} +_dispatch_dict = defaultdict(lambda: open, **fopeners) + + +def open_file(path_arg, mode="r"): + """Decorator to ensure clean opening and closing of files. + + Parameters + ---------- + path_arg : string or int + Name or index of the argument that is a path. + + mode : str + String for opening mode. + + Returns + ------- + _open_file : function + Function which cleanly executes the io. + + Examples + -------- + Decorate functions like this:: + + @open_file(0,"r") + def read_function(pathname): + pass + + @open_file(1,"w") + def write_function(G, pathname): + pass + + @open_file(1,"w") + def write_function(G, pathname="graph.dot"): + pass + + @open_file("pathname","w") + def write_function(G, pathname="graph.dot"): + pass + + @open_file("path", "w+") + def another_function(arg, **kwargs): + path = kwargs["path"] + pass + + Notes + ----- + Note that this decorator solves the problem when a path argument is + specified as a string, but it does not handle the situation when the + function wants to accept a default of None (and then handle it). + + Here is an example of how to handle this case:: + + @open_file("path") + def some_function(arg1, arg2, path=None): + if path is None: + fobj = tempfile.NamedTemporaryFile(delete=False) + else: + # `path` could have been a string or file object or something + # similar. In any event, the decorator has given us a file object + # and it will close it for us, if it should. + fobj = path + + try: + fobj.write("blah") + finally: + if path is None: + fobj.close() + + Normally, we'd want to use "with" to ensure that fobj gets closed. + However, the decorator will make `path` a file object for us, + and using "with" would undesirably close that file object. + Instead, we use a try block, as shown above. + When we exit the function, fobj will be closed, if it should be, by the decorator. + """ + + def _open_file(path): + # Now we have the path_arg. There are two types of input to consider: + # 1) string representing a path that should be opened + # 2) an already opened file object + if isinstance(path, str): + ext = splitext(path)[1] + elif isinstance(path, Path): + # path is a pathlib reference to a filename + ext = path.suffix + path = str(path) + else: + # could be None, or a file handle, in which case the algorithm will deal with it + return path, lambda: None + + fobj = _dispatch_dict[ext](path, mode=mode) + return fobj, lambda: fobj.close() + + return argmap(_open_file, path_arg, try_finally=True) + + +def nodes_or_number(which_args): + """Decorator to allow number of nodes or container of nodes. + + With this decorator, the specified argument can be either a number or a container + of nodes. If it is a number, the nodes used are `range(n)`. + This allows `nx.complete_graph(50)` in place of `nx.complete_graph(list(range(50)))`. + And it also allows `nx.complete_graph(any_list_of_nodes)`. + + Parameters + ---------- + which_args : string or int or sequence of strings or ints + If string, the name of the argument to be treated. + If int, the index of the argument to be treated. + If more than one node argument is allowed, can be a list of locations. + + Returns + ------- + _nodes_or_numbers : function + Function which replaces int args with ranges. + + Examples + -------- + Decorate functions like this:: + + @nodes_or_number("nodes") + def empty_graph(nodes): + # nodes is converted to a list of nodes + + @nodes_or_number(0) + def empty_graph(nodes): + # nodes is converted to a list of nodes + + @nodes_or_number(["m1", "m2"]) + def grid_2d_graph(m1, m2, periodic=False): + # m1 and m2 are each converted to a list of nodes + + @nodes_or_number([0, 1]) + def grid_2d_graph(m1, m2, periodic=False): + # m1 and m2 are each converted to a list of nodes + + @nodes_or_number(1) + def full_rary_tree(r, n) + # presumably r is a number. It is not handled by this decorator. + # n is converted to a list of nodes + """ + + def _nodes_or_number(n): + try: + nodes = list(range(n)) + except TypeError: + nodes = tuple(n) + else: + if n < 0: + raise nx.NetworkXError(f"Negative number of nodes not valid: {n}") + return (n, nodes) + + try: + iter_wa = iter(which_args) + except TypeError: + iter_wa = (which_args,) + + return argmap(_nodes_or_number, *iter_wa) + + +def np_random_state(random_state_argument): + """Decorator to generate a `numpy.random.RandomState` instance. + + The decorator processes the argument indicated by `random_state_argument` + using :func:`nx.utils.create_random_state`. + The argument value can be a seed (integer), or a `numpy.random.RandomState` + instance or (`None` or `numpy.random`). The latter options use the glocal + random number generator used by `numpy.random`. + The result is a `numpy.random.RandomState` instance. + + Parameters + ---------- + random_state_argument : string or int + The name or index of the argument to be converted + to a `numpy.random.RandomState` instance. + + Returns + ------- + _random_state : function + Function whose random_state keyword argument is a RandomState instance. + + Examples + -------- + Decorate functions like this:: + + @np_random_state("seed") + def random_float(seed=None): + return seed.rand() + + @np_random_state(0) + def random_float(rng=None): + return rng.rand() + + @np_random_state(1) + def random_array(dims, random_state=1): + return random_state.rand(*dims) + + See Also + -------- + py_random_state + """ + return argmap(create_random_state, random_state_argument) + + +def py_random_state(random_state_argument): + """Decorator to generate a random.Random instance (or equiv). + + The decorator processes the argument indicated by `random_state_argument` + using :func:`nx.utils.create_py_random_state`. + The argument value can be a seed (integer), or a random number generator:: + + If int, return a random.Random instance set with seed=int. + If random.Random instance, return it. + If None or the `random` package, return the global random number + generator used by `random`. + If np.random package, return the global numpy random number + generator wrapped in a PythonRandomInterface class. + If np.random.RandomState instance, return it wrapped in + PythonRandomInterface + If a PythonRandomInterface instance, return it + + Parameters + ---------- + random_state_argument : string or int + The name of the argument or the index of the argument in args that is + to be converted to the random.Random instance or numpy.random.RandomState + instance that mimics basic methods of random.Random. + + Returns + ------- + _random_state : function + Function whose random_state_argument is converted to a Random instance. + + Examples + -------- + Decorate functions like this:: + + @py_random_state("random_state") + def random_float(random_state=None): + return random_state.rand() + + @py_random_state(0) + def random_float(rng=None): + return rng.rand() + + @py_random_state(1) + def random_array(dims, seed=12345): + return seed.rand(*dims) + + See Also + -------- + np_random_state + """ + + return argmap(create_py_random_state, random_state_argument) + + +class argmap: + """A decorator to apply a map to arguments before calling the function + + This class provides a decorator that maps (transforms) arguments of the function + before the function is called. Thus for example, we have similar code + in many functions to determine whether an argument is the number of nodes + to be created, or a list of nodes to be handled. The decorator provides + the code to accept either -- transforming the indicated argument into a + list of nodes before the actual function is called. + + This decorator class allows us to process single or multiple arguments. + The arguments to be processed can be specified by string, naming the argument, + or by index, specifying the item in the args list. + + Parameters + ---------- + func : callable + The function to apply to arguments + + *args : iterable of (int, str or tuple) + A list of parameters, specified either as strings (their names), ints + (numerical indices) or tuples, which may contain ints, strings, and + (recursively) tuples. Each indicates which parameters the decorator + should map. Tuples indicate that the map function takes (and returns) + multiple parameters in the same order and nested structure as indicated + here. + + try_finally : bool (default: False) + When True, wrap the function call in a try-finally block with code + for the finally block created by `func`. This is used when the map + function constructs an object (like a file handle) that requires + post-processing (like closing). + + Note: try_finally decorators cannot be used to decorate generator + functions. + + Examples + -------- + Most of these examples use `@argmap(...)` to apply the decorator to + the function defined on the next line. + In the NetworkX codebase however, `argmap` is used within a function to + construct a decorator. That is, the decorator defines a mapping function + and then uses `argmap` to build and return a decorated function. + A simple example is a decorator that specifies which currency to report money. + The decorator (named `convert_to`) would be used like:: + + @convert_to("US_Dollars", "income") + def show_me_the_money(name, income): + print(f"{name} : {income}") + + And the code to create the decorator might be:: + + def convert_to(currency, which_arg): + def _convert(amount): + if amount.currency != currency: + amount = amount.to_currency(currency) + return amount + return argmap(_convert, which_arg) + + Despite this common idiom for argmap, most of the following examples + use the `@argmap(...)` idiom to save space. + + Here's an example use of argmap to sum the elements of two of the functions + arguments. The decorated function:: + + @argmap(sum, "xlist", "zlist") + def foo(xlist, y, zlist): + return xlist - y + zlist + + is syntactic sugar for:: + + def foo(xlist, y, zlist): + x = sum(xlist) + z = sum(zlist) + return x - y + z + + and is equivalent to (using argument indexes):: + + @argmap(sum, "xlist", 2) + def foo(xlist, y, zlist): + return xlist - y + zlist + + or:: + + @argmap(sum, "zlist", 0) + def foo(xlist, y, zlist): + return xlist - y + zlist + + Transforming functions can be applied to multiple arguments, such as:: + + def swap(x, y): + return y, x + + # the 2-tuple tells argmap that the map `swap` has 2 inputs/outputs. + @argmap(swap, ("a", "b")): + def foo(a, b, c): + return a / b * c + + is equivalent to:: + + def foo(a, b, c): + a, b = swap(a, b) + return a / b * c + + More generally, the applied arguments can be nested tuples of strings or ints. + The syntax `@argmap(some_func, ("a", ("b", "c")))` would expect `some_func` to + accept 2 inputs with the second expected to be a 2-tuple. It should then return + 2 outputs with the second a 2-tuple. The returns values would replace input "a" + "b" and "c" respectively. Similarly for `@argmap(some_func, (0, ("b", 2)))`. + + Also, note that an index larger than the number of named parameters is allowed + for variadic functions. For example:: + + def double(a): + return 2 * a + + @argmap(double, 3) + def overflow(a, *args): + return a, args + + print(overflow(1, 2, 3, 4, 5, 6)) # output is 1, (2, 3, 8, 5, 6) + + **Try Finally** + + Additionally, this `argmap` class can be used to create a decorator that + initiates a try...finally block. The decorator must be written to return + both the transformed argument and a closing function. + This feature was included to enable the `open_file` decorator which might + need to close the file or not depending on whether it had to open that file. + This feature uses the keyword-only `try_finally` argument to `@argmap`. + + For example this map opens a file and then makes sure it is closed:: + + def open_file(fn): + f = open(fn) + return f, lambda: f.close() + + The decorator applies that to the function `foo`:: + + @argmap(open_file, "file", try_finally=True) + def foo(file): + print(file.read()) + + is syntactic sugar for:: + + def foo(file): + file, close_file = open_file(file) + try: + print(file.read()) + finally: + close_file() + + and is equivalent to (using indexes):: + + @argmap(open_file, 0, try_finally=True) + def foo(file): + print(file.read()) + + Here's an example of the try_finally feature used to create a decorator:: + + def my_closing_decorator(which_arg): + def _opener(path): + if path is None: + path = open(path) + fclose = path.close + else: + # assume `path` handles the closing + fclose = lambda: None + return path, fclose + return argmap(_opener, which_arg, try_finally=True) + + which can then be used as:: + + @my_closing_decorator("file") + def fancy_reader(file=None): + # this code doesn't need to worry about closing the file + print(file.read()) + + Decorators with try_finally = True cannot be used with generator functions, + because the `finally` block is evaluated before the generator is exhausted:: + + @argmap(open_file, "file", try_finally=True) + def file_to_lines(file): + for line in file.readlines(): + yield line + + is equivalent to:: + + def file_to_lines_wrapped(file): + for line in file.readlines(): + yield line + + def file_to_lines_wrapper(file): + try: + file = open_file(file) + return file_to_lines_wrapped(file) + finally: + file.close() + + which behaves similarly to:: + + def file_to_lines_whoops(file): + file = open_file(file) + file.close() + for line in file.readlines(): + yield line + + because the `finally` block of `file_to_lines_wrapper` is executed before + the caller has a chance to exhaust the iterator. + + Notes + ----- + An object of this class is callable and intended to be used when + defining a decorator. Generally, a decorator takes a function as input + and constructs a function as output. Specifically, an `argmap` object + returns the input function decorated/wrapped so that specified arguments + are mapped (transformed) to new values before the decorated function is called. + + As an overview, the argmap object returns a new function with all the + dunder values of the original function (like `__doc__`, `__name__`, etc). + Code for this decorated function is built based on the original function's + signature. It starts by mapping the input arguments to potentially new + values. Then it calls the decorated function with these new values in place + of the indicated arguments that have been mapped. The return value of the + original function is then returned. This new function is the function that + is actually called by the user. + + Three additional features are provided. + 1) The code is lazily compiled. That is, the new function is returned + as an object without the code compiled, but with all information + needed so it can be compiled upon it's first invocation. This saves + time on import at the cost of additional time on the first call of + the function. Subsequent calls are then just as fast as normal. + + 2) If the "try_finally" keyword-only argument is True, a try block + follows each mapped argument, matched on the other side of the wrapped + call, by a finally block closing that mapping. We expect func to return + a 2-tuple: the mapped value and a function to be called in the finally + clause. This feature was included so the `open_file` decorator could + provide a file handle to the decorated function and close the file handle + after the function call. It even keeps track of whether to close the file + handle or not based on whether it had to open the file or the input was + already open. So, the decorated function does not need to include any + code to open or close files. + + 3) The maps applied can process multiple arguments. For example, + you could swap two arguments using a mapping, or transform + them to their sum and their difference. This was included to allow + a decorator in the `quality.py` module that checks that an input + `partition` is a valid partition of the nodes of the input graph `G`. + In this example, the map has inputs `(G, partition)`. After checking + for a valid partition, the map either raises an exception or leaves + the inputs unchanged. Thus many functions that make this check can + use the decorator rather than copy the checking code into each function. + More complicated nested argument structures are described below. + + The remaining notes describe the code structure and methods for this + class in broad terms to aid in understanding how to use it. + + Instantiating an `argmap` object simply stores the mapping function and + the input identifiers of which arguments to map. The resulting decorator + is ready to use this map to decorate any function. Calling that object + (`argmap.__call__`, but usually done via `@my_decorator`) a lazily + compiled thin wrapper of the decorated function is constructed, + wrapped with the necessary function dunder attributes like `__doc__` + and `__name__`. That thinly wrapped function is returned as the + decorated function. When that decorated function is called, the thin + wrapper of code calls `argmap._lazy_compile` which compiles the decorated + function (using `argmap.compile`) and replaces the code of the thin + wrapper with the newly compiled code. This saves the compilation step + every import of networkx, at the cost of compiling upon the first call + to the decorated function. + + When the decorated function is compiled, the code is recursively assembled + using the `argmap.assemble` method. The recursive nature is needed in + case of nested decorators. The result of the assembly is a number of + useful objects. + + sig : the function signature of the original decorated function as + constructed by :func:`argmap.signature`. This is constructed + using `inspect.signature` but enhanced with attribute + strings `sig_def` and `sig_call`, and other information + specific to mapping arguments of this function. + This information is used to construct a string of code defining + the new decorated function. + + wrapped_name : a unique internally used name constructed by argmap + for the decorated function. + + functions : a dict of the functions used inside the code of this + decorated function, to be used as `globals` in `exec`. + This dict is recursively updated to allow for nested decorating. + + mapblock : code (as a list of strings) to map the incoming argument + values to their mapped values. + + finallys : code (as a list of strings) to provide the possibly nested + set of finally clauses if needed. + + mutable_args : a bool indicating whether the `sig.args` tuple should be + converted to a list so mutation can occur. + + After this recursive assembly process, the `argmap.compile` method + constructs code (as strings) to convert the tuple `sig.args` to a list + if needed. It joins the defining code with appropriate indents and + compiles the result. Finally, this code is evaluated and the original + wrapper's implementation is replaced with the compiled version (see + `argmap._lazy_compile` for more details). + + Other `argmap` methods include `_name` and `_count` which allow internally + generated names to be unique within a python session. + The methods `_flatten` and `_indent` process the nested lists of strings + into properly indented python code ready to be compiled. + + More complicated nested tuples of arguments also allowed though + usually not used. For the simple 2 argument case, the argmap + input ("a", "b") implies the mapping function will take 2 arguments + and return a 2-tuple of mapped values. A more complicated example + with argmap input `("a", ("b", "c"))` requires the mapping function + take 2 inputs, with the second being a 2-tuple. It then must output + the 3 mapped values in the same nested structure `(newa, (newb, newc))`. + This level of generality is not often needed, but was convenient + to implement when handling the multiple arguments. + + See Also + -------- + not_implemented_for + open_file + nodes_or_number + random_state + py_random_state + networkx.community.quality.require_partition + require_partition + + """ + + def __init__(self, func, *args, try_finally=False): + self._func = func + self._args = args + self._finally = try_finally + + @staticmethod + def _lazy_compile(func): + """Compile the source of a wrapped function + + Assemble and compile the decorated function, and intrusively replace its + code with the compiled version's. The thinly wrapped function becomes + the decorated function. + + Parameters + ---------- + func : callable + A function returned by argmap.__call__ which is in the process + of being called for the first time. + + Returns + ------- + func : callable + The same function, with a new __code__ object. + + Notes + ----- + It was observed in NetworkX issue #4732 [1] that the import time of + NetworkX was significantly bloated by the use of decorators: over half + of the import time was being spent decorating functions. This was + somewhat improved by a change made to the `decorator` library, at the + cost of a relatively heavy-weight call to `inspect.Signature.bind` + for each call to the decorated function. + + The workaround we arrived at is to do minimal work at the time of + decoration. When the decorated function is called for the first time, + we compile a function with the same function signature as the wrapped + function. The resulting decorated function is faster than one made by + the `decorator` library, so that the overhead of the first call is + 'paid off' after a small number of calls. + + References + ---------- + + [1] https://github.com/networkx/networkx/issues/4732 + + """ + real_func = func.__argmap__.compile(func.__wrapped__) + func.__code__ = real_func.__code__ + func.__globals__.update(real_func.__globals__) + func.__dict__.update(real_func.__dict__) + return func + + def __call__(self, f): + """Construct a lazily decorated wrapper of f. + + The decorated function will be compiled when it is called for the first time, + and it will replace its own __code__ object so subsequent calls are fast. + + Parameters + ---------- + f : callable + A function to be decorated. + + Returns + ------- + func : callable + The decorated function. + + See Also + -------- + argmap._lazy_compile + """ + + def func(*args, __wrapper=None, **kwargs): + return argmap._lazy_compile(__wrapper)(*args, **kwargs) + + # standard function-wrapping stuff + func.__name__ = f.__name__ + func.__doc__ = f.__doc__ + func.__defaults__ = f.__defaults__ + func.__kwdefaults__.update(f.__kwdefaults__ or {}) + func.__module__ = f.__module__ + func.__qualname__ = f.__qualname__ + func.__dict__.update(f.__dict__) + func.__wrapped__ = f + + # now that we've wrapped f, we may have picked up some __dict__ or + # __kwdefaults__ items that were set by a previous argmap. Thus, we set + # these values after those update() calls. + + # If we attempt to access func from within itself, that happens through + # a closure -- which trips an error when we replace func.__code__. The + # standard workaround for functions which can't see themselves is to use + # a Y-combinator, as we do here. + func.__kwdefaults__["_argmap__wrapper"] = func + + # this self-reference is here because functools.wraps preserves + # everything in __dict__, and we don't want to mistake a non-argmap + # wrapper for an argmap wrapper + func.__self__ = func + + # this is used to variously call self.assemble and self.compile + func.__argmap__ = self + + if hasattr(f, "__argmap__"): + func.__is_generator = f.__is_generator + else: + func.__is_generator = inspect.isgeneratorfunction(f) + + if self._finally and func.__is_generator: + raise nx.NetworkXError("argmap cannot decorate generators with try_finally") + + return func + + __count = 0 + + @classmethod + def _count(cls): + """Maintain a globally-unique identifier for function names and "file" names + + Note that this counter is a class method reporting a class variable + so the count is unique within a Python session. It could differ from + session to session for a specific decorator depending on the order + that the decorators are created. But that doesn't disrupt `argmap`. + + This is used in two places: to construct unique variable names + in the `_name` method and to construct unique fictitious filenames + in the `_compile` method. + + Returns + ------- + count : int + An integer unique to this Python session (simply counts from zero) + """ + cls.__count += 1 + return cls.__count + + _bad_chars = re.compile("[^a-zA-Z0-9_]") + + @classmethod + def _name(cls, f): + """Mangle the name of a function to be unique but somewhat human-readable + + The names are unique within a Python session and set using `_count`. + + Parameters + ---------- + f : str or object + + Returns + ------- + name : str + The mangled version of `f.__name__` (if `f.__name__` exists) or `f` + + """ + f = f.__name__ if hasattr(f, "__name__") else f + fname = re.sub(cls._bad_chars, "_", f) + return f"argmap_{fname}_{cls._count()}" + + def compile(self, f): + """Compile the decorated function. + + Called once for a given decorated function -- collects the code from all + argmap decorators in the stack, and compiles the decorated function. + + Much of the work done here uses the `assemble` method to allow recursive + treatment of multiple argmap decorators on a single decorated function. + That flattens the argmap decorators, collects the source code to construct + a single decorated function, then compiles/executes/returns that function. + + The source code for the decorated function is stored as an attribute + `_code` on the function object itself. + + Note that Python's `compile` function requires a filename, but this + code is constructed without a file, so a fictitious filename is used + to describe where the function comes from. The name is something like: + "argmap compilation 4". + + Parameters + ---------- + f : callable + The function to be decorated + + Returns + ------- + func : callable + The decorated file + + """ + sig, wrapped_name, functions, mapblock, finallys, mutable_args = self.assemble( + f + ) + + call = f"{sig.call_sig.format(wrapped_name)}#" + mut_args = f"{sig.args} = list({sig.args})" if mutable_args else "" + body = argmap._indent(sig.def_sig, mut_args, mapblock, call, finallys) + code = "\n".join(body) + + locl = {} + globl = dict(functions.values()) + filename = f"{self.__class__} compilation {self._count()}" + compiled = compile(code, filename, "exec") + exec(compiled, globl, locl) + func = locl[sig.name] + func._code = code + return func + + def assemble(self, f): + """Collects components of the source for the decorated function wrapping f. + + If `f` has multiple argmap decorators, we recursively assemble the stack of + decorators into a single flattened function. + + This method is part of the `compile` method's process yet separated + from that method to allow recursive processing. The outputs are + strings, dictionaries and lists that collect needed info to + flatten any nested argmap-decoration. + + Parameters + ---------- + f : callable + The function to be decorated. If f is argmapped, we assemble it. + + Returns + ------- + sig : argmap.Signature + The function signature as an `argmap.Signature` object. + wrapped_name : str + The mangled name used to represent the wrapped function in the code + being assembled. + functions : dict + A dictionary mapping id(g) -> (mangled_name(g), g) for functions g + referred to in the code being assembled. These need to be present + in the ``globals`` scope of ``exec`` when defining the decorated + function. + mapblock : list of lists and/or strings + Code that implements mapping of parameters including any try blocks + if needed. This code will precede the decorated function call. + finallys : list of lists and/or strings + Code that implements the finally blocks to post-process the + arguments (usually close any files if needed) after the + decorated function is called. + mutable_args : bool + True if the decorator needs to modify positional arguments + via their indices. The compile method then turns the argument + tuple into a list so that the arguments can be modified. + """ + + # first, we check if f is already argmapped -- if that's the case, + # build up the function recursively. + # > mapblock is generally a list of function calls of the sort + # arg = func(arg) + # in addition to some try-blocks if needed. + # > finallys is a recursive list of finally blocks of the sort + # finally: + # close_func_1() + # finally: + # close_func_2() + # > functions is a dict of functions used in the scope of our decorated + # function. It will be used to construct globals used in compilation. + # We make functions[id(f)] = name_of_f, f to ensure that a given + # function is stored and named exactly once even if called by + # nested decorators. + if hasattr(f, "__argmap__") and f.__self__ is f: + ( + sig, + wrapped_name, + functions, + mapblock, + finallys, + mutable_args, + ) = f.__argmap__.assemble(f.__wrapped__) + functions = dict(functions) # shallow-copy just in case + else: + sig = self.signature(f) + wrapped_name = self._name(f) + mapblock, finallys = [], [] + functions = {id(f): (wrapped_name, f)} + mutable_args = False + + if id(self._func) in functions: + fname, _ = functions[id(self._func)] + else: + fname, _ = functions[id(self._func)] = self._name(self._func), self._func + + # this is a bit complicated -- we can call functions with a variety of + # nested arguments, so long as their input and output are tuples with + # the same nested structure. e.g. ("a", "b") maps arguments a and b. + # A more complicated nesting like (0, (3, 4)) maps arguments 0, 3, 4 + # expecting the mapping to output new values in the same nested shape. + # The ability to argmap multiple arguments was necessary for + # the decorator `nx.algorithms.community.quality.require_partition`, and + # while we're not taking full advantage of the ability to handle + # multiply-nested tuples, it was convenient to implement this in + # generality because the recursive call to `get_name` is necessary in + # any case. + applied = set() + + def get_name(arg, first=True): + nonlocal mutable_args + if isinstance(arg, tuple): + name = ", ".join(get_name(x, False) for x in arg) + return name if first else f"({name})" + if arg in applied: + raise nx.NetworkXError(f"argument {arg} is specified multiple times") + applied.add(arg) + if arg in sig.names: + return sig.names[arg] + elif isinstance(arg, str): + if sig.kwargs is None: + raise nx.NetworkXError( + f"name {arg} is not a named parameter and this function doesn't have kwargs" + ) + return f"{sig.kwargs}[{arg!r}]" + else: + if sig.args is None: + raise nx.NetworkXError( + f"index {arg} not a parameter index and this function doesn't have args" + ) + mutable_args = True + return f"{sig.args}[{arg - sig.n_positional}]" + + if self._finally: + # here's where we handle try_finally decorators. Such a decorator + # returns a mapped argument and a function to be called in a + # finally block. This feature was required by the open_file + # decorator. The below generates the code + # + # name, final = func(name) #<--append to mapblock + # try: #<--append to mapblock + # ... more argmapping and try blocks + # return WRAPPED_FUNCTION(...) + # ... more finally blocks + # finally: #<--prepend to finallys + # final() #<--prepend to finallys + # + for a in self._args: + name = get_name(a) + final = self._name(name) + mapblock.append(f"{name}, {final} = {fname}({name})") + mapblock.append("try:") + finallys = ["finally:", f"{final}()#", "#", finallys] + else: + mapblock.extend( + f"{name} = {fname}({name})" for name in map(get_name, self._args) + ) + + return sig, wrapped_name, functions, mapblock, finallys, mutable_args + + @classmethod + def signature(cls, f): + r"""Construct a Signature object describing `f` + + Compute a Signature so that we can write a function wrapping f with + the same signature and call-type. + + Parameters + ---------- + f : callable + A function to be decorated + + Returns + ------- + sig : argmap.Signature + The Signature of f + + Notes + ----- + The Signature is a namedtuple with names: + + name : a unique version of the name of the decorated function + signature : the inspect.signature of the decorated function + def_sig : a string used as code to define the new function + call_sig : a string used as code to call the decorated function + names : a dict keyed by argument name and index to the argument's name + n_positional : the number of positional arguments in the signature + args : the name of the VAR_POSITIONAL argument if any, i.e. \*theseargs + kwargs : the name of the VAR_KEYWORDS argument if any, i.e. \*\*kwargs + + These named attributes of the signature are used in `assemble` and `compile` + to construct a string of source code for the decorated function. + + """ + sig = inspect.signature(f, follow_wrapped=False) + def_sig = [] + call_sig = [] + names = {} + + kind = None + args = None + kwargs = None + npos = 0 + for i, param in enumerate(sig.parameters.values()): + # parameters can be position-only, keyword-or-position, keyword-only + # in any combination, but only in the order as above. we do edge + # detection to add the appropriate punctuation + prev = kind + kind = param.kind + if prev == param.POSITIONAL_ONLY != kind: + # the last token was position-only, but this one isn't + def_sig.append("/") + if prev != param.KEYWORD_ONLY == kind != param.VAR_POSITIONAL: + # param is the first keyword-only arg and isn't starred + def_sig.append("*") + + # star arguments as appropriate + if kind == param.VAR_POSITIONAL: + name = "*" + param.name + args = param.name + count = 0 + elif kind == param.VAR_KEYWORD: + name = "**" + param.name + kwargs = param.name + count = 0 + else: + names[i] = names[param.name] = param.name + name = param.name + count = 1 + + # assign to keyword-only args in the function call + if kind == param.KEYWORD_ONLY: + call_sig.append(f"{name} = {name}") + else: + npos += count + call_sig.append(name) + + def_sig.append(name) + + fname = cls._name(f) + def_sig = f'def {fname}({", ".join(def_sig)}):' + + call_sig = f"return {{}}({', '.join(call_sig)})" + + return cls.Signature(fname, sig, def_sig, call_sig, names, npos, args, kwargs) + + Signature = collections.namedtuple( + "Signature", + [ + "name", + "signature", + "def_sig", + "call_sig", + "names", + "n_positional", + "args", + "kwargs", + ], + ) + + @staticmethod + def _flatten(nestlist, visited): + """flattens a recursive list of lists that doesn't have cyclic references + + Parameters + ---------- + nestlist : iterable + A recursive list of objects to be flattened into a single iterable + + visited : set + A set of object ids which have been walked -- initialize with an + empty set + + Yields + ------ + Non-list objects contained in nestlist + + """ + for thing in nestlist: + if isinstance(thing, list): + if id(thing) in visited: + raise ValueError("A cycle was found in nestlist. Be a tree.") + else: + visited.add(id(thing)) + yield from argmap._flatten(thing, visited) + else: + yield thing + + _tabs = " " * 64 + + @staticmethod + def _indent(*lines): + """Indent list of code lines to make executable Python code + + Indents a tree-recursive list of strings, following the rule that one + space is added to the tab after a line that ends in a colon, and one is + removed after a line that ends in an hashmark. + + Parameters + ---------- + *lines : lists and/or strings + A recursive list of strings to be assembled into properly indented + code. + + Returns + ------- + code : str + + Examples + -------- + + argmap._indent(*["try:", "try:", "pass#", "finally:", "pass#", "#", + "finally:", "pass#"]) + + renders to + + '''try: + try: + pass# + finally: + pass# + # + finally: + pass#''' + """ + depth = 0 + for line in argmap._flatten(lines, set()): + yield f"{argmap._tabs[:depth]}{line}" + depth += (line[-1:] == ":") - (line[-1:] == "#") + + +# Vendored in from https://github.com/scikit-learn/scikit-learn/blob/8ed0270b99344cee9bb253cbfa1d986561ea6cd7/sklearn/utils/validation.py#L37C1-L90C44 +def deprecate_positional_args(func=None, *, version): + """Decorator for methods that issues warnings for positional arguments. + + Using the keyword-only argument syntax in pep 3102, arguments after the + * will issue a warning when passed as a positional argument. + + Parameters + ---------- + func : callable, default=None + Function to check arguments on. + version : callable, default="1.3" + The version when positional arguments will result in error. + """ + + def _inner_deprecate_positional_args(f): + sig = signature(f) + kwonly_args = [] + all_args = [] + + for name, param in sig.parameters.items(): + if param.kind == Parameter.POSITIONAL_OR_KEYWORD: + all_args.append(name) + elif param.kind == Parameter.KEYWORD_ONLY: + kwonly_args.append(name) + + @wraps(f) + def inner_f(*args, **kwargs): + extra_args = len(args) - len(all_args) + if extra_args <= 0: + return f(*args, **kwargs) + + # extra_args > 0 + args_msg = [ + f"{name}={arg}" + for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:]) + ] + args_msg = ", ".join(args_msg) + warnings.warn( + ( + f"Pass {args_msg} as keyword args. From NetworkX version " + f"{version} passing these as positional arguments " + "will result in an error" + ), + FutureWarning, + ) + kwargs.update(zip(sig.parameters, args)) + return f(**kwargs) + + return inner_f + + if func is not None: + return _inner_deprecate_positional_args(func) + + return _inner_deprecate_positional_args diff --git a/phivenv/Lib/site-packages/networkx/utils/heaps.py b/phivenv/Lib/site-packages/networkx/utils/heaps.py new file mode 100644 index 0000000000000000000000000000000000000000..3db27906314924380a8a87f2dfd3a81292ffbb9f --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/heaps.py @@ -0,0 +1,340 @@ +""" +Min-heaps. +""" + +from heapq import heappop, heappush +from itertools import count + +import networkx as nx + +__all__ = ["MinHeap", "PairingHeap", "BinaryHeap"] + + +class MinHeap: + """Base class for min-heaps. + + A MinHeap stores a collection of key-value pairs ordered by their values. + It supports querying the minimum pair, inserting a new pair, decreasing the + value in an existing pair and deleting the minimum pair. + """ + + class _Item: + """Used by subclassess to represent a key-value pair.""" + + __slots__ = ("key", "value") + + def __init__(self, key, value): + self.key = key + self.value = value + + def __repr__(self): + return repr((self.key, self.value)) + + def __init__(self): + """Initialize a new min-heap.""" + self._dict = {} + + def min(self): + """Query the minimum key-value pair. + + Returns + ------- + key, value : tuple + The key-value pair with the minimum value in the heap. + + Raises + ------ + NetworkXError + If the heap is empty. + """ + raise NotImplementedError + + def pop(self): + """Delete the minimum pair in the heap. + + Returns + ------- + key, value : tuple + The key-value pair with the minimum value in the heap. + + Raises + ------ + NetworkXError + If the heap is empty. + """ + raise NotImplementedError + + def get(self, key, default=None): + """Returns the value associated with a key. + + Parameters + ---------- + key : hashable object + The key to be looked up. + + default : object + Default value to return if the key is not present in the heap. + Default value: None. + + Returns + ------- + value : object. + The value associated with the key. + """ + raise NotImplementedError + + def insert(self, key, value, allow_increase=False): + """Insert a new key-value pair or modify the value in an existing + pair. + + Parameters + ---------- + key : hashable object + The key. + + value : object comparable with existing values. + The value. + + allow_increase : bool + Whether the value is allowed to increase. If False, attempts to + increase an existing value have no effect. Default value: False. + + Returns + ------- + decreased : bool + True if a pair is inserted or the existing value is decreased. + """ + raise NotImplementedError + + def __nonzero__(self): + """Returns whether the heap if empty.""" + return bool(self._dict) + + def __bool__(self): + """Returns whether the heap if empty.""" + return bool(self._dict) + + def __len__(self): + """Returns the number of key-value pairs in the heap.""" + return len(self._dict) + + def __contains__(self, key): + """Returns whether a key exists in the heap. + + Parameters + ---------- + key : any hashable object. + The key to be looked up. + """ + return key in self._dict + + +class PairingHeap(MinHeap): + """A pairing heap.""" + + class _Node(MinHeap._Item): + """A node in a pairing heap. + + A tree in a pairing heap is stored using the left-child, right-sibling + representation. + """ + + __slots__ = ("left", "next", "prev", "parent") + + def __init__(self, key, value): + super().__init__(key, value) + # The leftmost child. + self.left = None + # The next sibling. + self.next = None + # The previous sibling. + self.prev = None + # The parent. + self.parent = None + + def __init__(self): + """Initialize a pairing heap.""" + super().__init__() + self._root = None + + def min(self): + if self._root is None: + raise nx.NetworkXError("heap is empty.") + return (self._root.key, self._root.value) + + def pop(self): + if self._root is None: + raise nx.NetworkXError("heap is empty.") + min_node = self._root + self._root = self._merge_children(self._root) + del self._dict[min_node.key] + return (min_node.key, min_node.value) + + def get(self, key, default=None): + node = self._dict.get(key) + return node.value if node is not None else default + + def insert(self, key, value, allow_increase=False): + node = self._dict.get(key) + root = self._root + if node is not None: + if value < node.value: + node.value = value + if node is not root and value < node.parent.value: + self._cut(node) + self._root = self._link(root, node) + return True + elif allow_increase and value > node.value: + node.value = value + child = self._merge_children(node) + # Nonstandard step: Link the merged subtree with the root. See + # below for the standard step. + if child is not None: + self._root = self._link(self._root, child) + # Standard step: Perform a decrease followed by a pop as if the + # value were the smallest in the heap. Then insert the new + # value into the heap. + # if node is not root: + # self._cut(node) + # if child is not None: + # root = self._link(root, child) + # self._root = self._link(root, node) + # else: + # self._root = (self._link(node, child) + # if child is not None else node) + return False + else: + # Insert a new key. + node = self._Node(key, value) + self._dict[key] = node + self._root = self._link(root, node) if root is not None else node + return True + + def _link(self, root, other): + """Link two nodes, making the one with the smaller value the parent of + the other. + """ + if other.value < root.value: + root, other = other, root + next = root.left + other.next = next + if next is not None: + next.prev = other + other.prev = None + root.left = other + other.parent = root + return root + + def _merge_children(self, root): + """Merge the subtrees of the root using the standard two-pass method. + The resulting subtree is detached from the root. + """ + node = root.left + root.left = None + if node is not None: + link = self._link + # Pass 1: Merge pairs of consecutive subtrees from left to right. + # At the end of the pass, only the prev pointers of the resulting + # subtrees have meaningful values. The other pointers will be fixed + # in pass 2. + prev = None + while True: + next = node.next + if next is None: + node.prev = prev + break + next_next = next.next + node = link(node, next) + node.prev = prev + prev = node + if next_next is None: + break + node = next_next + # Pass 2: Successively merge the subtrees produced by pass 1 from + # right to left with the rightmost one. + prev = node.prev + while prev is not None: + prev_prev = prev.prev + node = link(prev, node) + prev = prev_prev + # Now node can become the new root. Its has no parent nor siblings. + node.prev = None + node.next = None + node.parent = None + return node + + def _cut(self, node): + """Cut a node from its parent.""" + prev = node.prev + next = node.next + if prev is not None: + prev.next = next + else: + node.parent.left = next + node.prev = None + if next is not None: + next.prev = prev + node.next = None + node.parent = None + + +class BinaryHeap(MinHeap): + """A binary heap.""" + + def __init__(self): + """Initialize a binary heap.""" + super().__init__() + self._heap = [] + self._count = count() + + def min(self): + dict = self._dict + if not dict: + raise nx.NetworkXError("heap is empty") + heap = self._heap + pop = heappop + # Repeatedly remove stale key-value pairs until a up-to-date one is + # met. + while True: + value, _, key = heap[0] + if key in dict and value == dict[key]: + break + pop(heap) + return (key, value) + + def pop(self): + dict = self._dict + if not dict: + raise nx.NetworkXError("heap is empty") + heap = self._heap + pop = heappop + # Repeatedly remove stale key-value pairs until a up-to-date one is + # met. + while True: + value, _, key = heap[0] + pop(heap) + if key in dict and value == dict[key]: + break + del dict[key] + return (key, value) + + def get(self, key, default=None): + return self._dict.get(key, default) + + def insert(self, key, value, allow_increase=False): + dict = self._dict + if key in dict: + old_value = dict[key] + if value < old_value or (allow_increase and value > old_value): + # Since there is no way to efficiently obtain the location of a + # key-value pair in the heap, insert a new pair even if ones + # with the same key may already be present. Deem the old ones + # as stale and skip them when the minimum pair is queried. + dict[key] = value + heappush(self._heap, (value, next(self._count), key)) + return value < old_value + return False + else: + dict[key] = value + heappush(self._heap, (value, next(self._count), key)) + return True diff --git a/phivenv/Lib/site-packages/networkx/utils/mapped_queue.py b/phivenv/Lib/site-packages/networkx/utils/mapped_queue.py new file mode 100644 index 0000000000000000000000000000000000000000..5dd8590764521d4fab179208f46a2c4f1fd28e58 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/mapped_queue.py @@ -0,0 +1,298 @@ +"""Priority queue class with updatable priorities. +""" + +import heapq + +__all__ = ["MappedQueue"] + + +class _HeapElement: + """This proxy class separates the heap element from its priority. + + The idea is that using a 2-tuple (priority, element) works + for sorting, but not for dict lookup because priorities are + often floating point values so round-off can mess up equality. + + So, we need inequalities to look at the priority (for sorting) + and equality (and hash) to look at the element to enable + updates to the priority. + + Unfortunately, this class can be tricky to work with if you forget that + `__lt__` compares the priority while `__eq__` compares the element. + In `greedy_modularity_communities()` the following code is + used to check that two _HeapElements differ in either element or priority: + + if d_oldmax != row_max or d_oldmax.priority != row_max.priority: + + If the priorities are the same, this implementation uses the element + as a tiebreaker. This provides compatibility with older systems that + use tuples to combine priority and elements. + """ + + __slots__ = ["priority", "element", "_hash"] + + def __init__(self, priority, element): + self.priority = priority + self.element = element + self._hash = hash(element) + + def __lt__(self, other): + try: + other_priority = other.priority + except AttributeError: + return self.priority < other + # assume comparing to another _HeapElement + if self.priority == other_priority: + try: + return self.element < other.element + except TypeError as err: + raise TypeError( + "Consider using a tuple, with a priority value that can be compared." + ) + return self.priority < other_priority + + def __gt__(self, other): + try: + other_priority = other.priority + except AttributeError: + return self.priority > other + # assume comparing to another _HeapElement + if self.priority == other_priority: + try: + return self.element > other.element + except TypeError as err: + raise TypeError( + "Consider using a tuple, with a priority value that can be compared." + ) + return self.priority > other_priority + + def __eq__(self, other): + try: + return self.element == other.element + except AttributeError: + return self.element == other + + def __hash__(self): + return self._hash + + def __getitem__(self, indx): + return self.priority if indx == 0 else self.element[indx - 1] + + def __iter__(self): + yield self.priority + try: + yield from self.element + except TypeError: + yield self.element + + def __repr__(self): + return f"_HeapElement({self.priority}, {self.element})" + + +class MappedQueue: + """The MappedQueue class implements a min-heap with removal and update-priority. + + The min heap uses heapq as well as custom written _siftup and _siftdown + methods to allow the heap positions to be tracked by an additional dict + keyed by element to position. The smallest element can be popped in O(1) time, + new elements can be pushed in O(log n) time, and any element can be removed + or updated in O(log n) time. The queue cannot contain duplicate elements + and an attempt to push an element already in the queue will have no effect. + + MappedQueue complements the heapq package from the python standard + library. While MappedQueue is designed for maximum compatibility with + heapq, it adds element removal, lookup, and priority update. + + Parameters + ---------- + data : dict or iterable + + Examples + -------- + + A `MappedQueue` can be created empty, or optionally, given a dictionary + of initial elements and priorities. The methods `push`, `pop`, + `remove`, and `update` operate on the queue. + + >>> colors_nm = {'red':665, 'blue': 470, 'green': 550} + >>> q = MappedQueue(colors_nm) + >>> q.remove('red') + >>> q.update('green', 'violet', 400) + >>> q.push('indigo', 425) + True + >>> [q.pop().element for i in range(len(q.heap))] + ['violet', 'indigo', 'blue'] + + A `MappedQueue` can also be initialized with a list or other iterable. The priority is assumed + to be the sort order of the items in the list. + + >>> q = MappedQueue([916, 50, 4609, 493, 237]) + >>> q.remove(493) + >>> q.update(237, 1117) + >>> [q.pop() for i in range(len(q.heap))] + [50, 916, 1117, 4609] + + An exception is raised if the elements are not comparable. + + >>> q = MappedQueue([100, 'a']) + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'int' and 'str' + + To avoid the exception, use a dictionary to assign priorities to the elements. + + >>> q = MappedQueue({100: 0, 'a': 1 }) + + References + ---------- + .. [1] Cormen, T. H., Leiserson, C. E., Rivest, R. L., & Stein, C. (2001). + Introduction to algorithms second edition. + .. [2] Knuth, D. E. (1997). The art of computer programming (Vol. 3). + Pearson Education. + """ + + def __init__(self, data=None): + """Priority queue class with updatable priorities.""" + if data is None: + self.heap = [] + elif isinstance(data, dict): + self.heap = [_HeapElement(v, k) for k, v in data.items()] + else: + self.heap = list(data) + self.position = {} + self._heapify() + + def _heapify(self): + """Restore heap invariant and recalculate map.""" + heapq.heapify(self.heap) + self.position = {elt: pos for pos, elt in enumerate(self.heap)} + if len(self.heap) != len(self.position): + raise AssertionError("Heap contains duplicate elements") + + def __len__(self): + return len(self.heap) + + def push(self, elt, priority=None): + """Add an element to the queue.""" + if priority is not None: + elt = _HeapElement(priority, elt) + # If element is already in queue, do nothing + if elt in self.position: + return False + # Add element to heap and dict + pos = len(self.heap) + self.heap.append(elt) + self.position[elt] = pos + # Restore invariant by sifting down + self._siftdown(0, pos) + return True + + def pop(self): + """Remove and return the smallest element in the queue.""" + # Remove smallest element + elt = self.heap[0] + del self.position[elt] + # If elt is last item, remove and return + if len(self.heap) == 1: + self.heap.pop() + return elt + # Replace root with last element + last = self.heap.pop() + self.heap[0] = last + self.position[last] = 0 + # Restore invariant by sifting up + self._siftup(0) + # Return smallest element + return elt + + def update(self, elt, new, priority=None): + """Replace an element in the queue with a new one.""" + if priority is not None: + new = _HeapElement(priority, new) + # Replace + pos = self.position[elt] + self.heap[pos] = new + del self.position[elt] + self.position[new] = pos + # Restore invariant by sifting up + self._siftup(pos) + + def remove(self, elt): + """Remove an element from the queue.""" + # Find and remove element + try: + pos = self.position[elt] + del self.position[elt] + except KeyError: + # Not in queue + raise + # If elt is last item, remove and return + if pos == len(self.heap) - 1: + self.heap.pop() + return + # Replace elt with last element + last = self.heap.pop() + self.heap[pos] = last + self.position[last] = pos + # Restore invariant by sifting up + self._siftup(pos) + + def _siftup(self, pos): + """Move smaller child up until hitting a leaf. + + Built to mimic code for heapq._siftup + only updating position dict too. + """ + heap, position = self.heap, self.position + end_pos = len(heap) + startpos = pos + newitem = heap[pos] + # Shift up the smaller child until hitting a leaf + child_pos = (pos << 1) + 1 # start with leftmost child position + while child_pos < end_pos: + # Set child_pos to index of smaller child. + child = heap[child_pos] + right_pos = child_pos + 1 + if right_pos < end_pos: + right = heap[right_pos] + if not child < right: + child = right + child_pos = right_pos + # Move the smaller child up. + heap[pos] = child + position[child] = pos + pos = child_pos + child_pos = (pos << 1) + 1 + # pos is a leaf position. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + while pos > 0: + parent_pos = (pos - 1) >> 1 + parent = heap[parent_pos] + if not newitem < parent: + break + heap[pos] = parent + position[parent] = pos + pos = parent_pos + heap[pos] = newitem + position[newitem] = pos + + def _siftdown(self, start_pos, pos): + """Restore invariant. keep swapping with parent until smaller. + + Built to mimic code for heapq._siftdown + only updating position dict too. + """ + heap, position = self.heap, self.position + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > start_pos: + parent_pos = (pos - 1) >> 1 + parent = heap[parent_pos] + if not newitem < parent: + break + heap[pos] = parent + position[parent] = pos + pos = parent_pos + heap[pos] = newitem + position[newitem] = pos diff --git a/phivenv/Lib/site-packages/networkx/utils/misc.py b/phivenv/Lib/site-packages/networkx/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..31189bf2b574de85962acabc828d3ba3054dea53 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/misc.py @@ -0,0 +1,491 @@ +""" +Miscellaneous Helpers for NetworkX. + +These are not imported into the base networkx namespace but +can be accessed, for example, as + +>>> import networkx +>>> networkx.utils.make_list_of_ints({1, 2, 3}) +[1, 2, 3] +>>> networkx.utils.arbitrary_element({5, 1, 7}) # doctest: +SKIP +1 +""" + +import sys +import uuid +import warnings +from collections import defaultdict, deque +from collections.abc import Iterable, Iterator, Sized +from itertools import chain, tee + +import networkx as nx + +__all__ = [ + "flatten", + "make_list_of_ints", + "dict_to_numpy_array", + "arbitrary_element", + "pairwise", + "groups", + "create_random_state", + "create_py_random_state", + "PythonRandomInterface", + "nodes_equal", + "edges_equal", + "graphs_equal", +] + + +# some cookbook stuff +# used in deciding whether something is a bunch of nodes, edges, etc. +# see G.add_nodes and others in Graph Class in networkx/base.py + + +def flatten(obj, result=None): + """Return flattened version of (possibly nested) iterable object.""" + if not isinstance(obj, (Iterable, Sized)) or isinstance(obj, str): + return obj + if result is None: + result = [] + for item in obj: + if not isinstance(item, (Iterable, Sized)) or isinstance(item, str): + result.append(item) + else: + flatten(item, result) + return tuple(result) + + +def make_list_of_ints(sequence): + """Return list of ints from sequence of integral numbers. + + All elements of the sequence must satisfy int(element) == element + or a ValueError is raised. Sequence is iterated through once. + + If sequence is a list, the non-int values are replaced with ints. + So, no new list is created + """ + if not isinstance(sequence, list): + result = [] + for i in sequence: + errmsg = f"sequence is not all integers: {i}" + try: + ii = int(i) + except ValueError: + raise nx.NetworkXError(errmsg) from None + if ii != i: + raise nx.NetworkXError(errmsg) + result.append(ii) + return result + # original sequence is a list... in-place conversion to ints + for indx, i in enumerate(sequence): + errmsg = f"sequence is not all integers: {i}" + if isinstance(i, int): + continue + try: + ii = int(i) + except ValueError: + raise nx.NetworkXError(errmsg) from None + if ii != i: + raise nx.NetworkXError(errmsg) + sequence[indx] = ii + return sequence + + +def dict_to_numpy_array(d, mapping=None): + """Convert a dictionary of dictionaries to a numpy array + with optional mapping.""" + try: + return _dict_to_numpy_array2(d, mapping) + except (AttributeError, TypeError): + # AttributeError is when no mapping was provided and v.keys() fails. + # TypeError is when a mapping was provided and d[k1][k2] fails. + return _dict_to_numpy_array1(d, mapping) + + +def _dict_to_numpy_array2(d, mapping=None): + """Convert a dictionary of dictionaries to a 2d numpy array + with optional mapping. + + """ + import numpy as np + + if mapping is None: + s = set(d.keys()) + for k, v in d.items(): + s.update(v.keys()) + mapping = dict(zip(s, range(len(s)))) + n = len(mapping) + a = np.zeros((n, n)) + for k1, i in mapping.items(): + for k2, j in mapping.items(): + try: + a[i, j] = d[k1][k2] + except KeyError: + pass + return a + + +def _dict_to_numpy_array1(d, mapping=None): + """Convert a dictionary of numbers to a 1d numpy array with optional mapping.""" + import numpy as np + + if mapping is None: + s = set(d.keys()) + mapping = dict(zip(s, range(len(s)))) + n = len(mapping) + a = np.zeros(n) + for k1, i in mapping.items(): + i = mapping[k1] + a[i] = d[k1] + return a + + +def arbitrary_element(iterable): + """Returns an arbitrary element of `iterable` without removing it. + + This is most useful for "peeking" at an arbitrary element of a set, + but can be used for any list, dictionary, etc., as well. + + Parameters + ---------- + iterable : `abc.collections.Iterable` instance + Any object that implements ``__iter__``, e.g. set, dict, list, tuple, + etc. + + Returns + ------- + The object that results from ``next(iter(iterable))`` + + Raises + ------ + ValueError + If `iterable` is an iterator (because the current implementation of + this function would consume an element from the iterator). + + Examples + -------- + Arbitrary elements from common Iterable objects: + + >>> nx.utils.arbitrary_element([1, 2, 3]) # list + 1 + >>> nx.utils.arbitrary_element((1, 2, 3)) # tuple + 1 + >>> nx.utils.arbitrary_element({1, 2, 3}) # set + 1 + >>> d = {k: v for k, v in zip([1, 2, 3], [3, 2, 1])} + >>> nx.utils.arbitrary_element(d) # dict_keys + 1 + >>> nx.utils.arbitrary_element(d.values()) # dict values + 3 + + `str` is also an Iterable: + + >>> nx.utils.arbitrary_element("hello") + 'h' + + :exc:`ValueError` is raised if `iterable` is an iterator: + + >>> iterator = iter([1, 2, 3]) # Iterator, *not* Iterable + >>> nx.utils.arbitrary_element(iterator) + Traceback (most recent call last): + ... + ValueError: cannot return an arbitrary item from an iterator + + Notes + ----- + This function does not return a *random* element. If `iterable` is + ordered, sequential calls will return the same value:: + + >>> l = [1, 2, 3] + >>> nx.utils.arbitrary_element(l) + 1 + >>> nx.utils.arbitrary_element(l) + 1 + + """ + if isinstance(iterable, Iterator): + raise ValueError("cannot return an arbitrary item from an iterator") + # Another possible implementation is ``for x in iterable: return x``. + return next(iter(iterable)) + + +# Recipe from the itertools documentation. +def pairwise(iterable, cyclic=False): + "s -> (s0, s1), (s1, s2), (s2, s3), ..." + a, b = tee(iterable) + first = next(b, None) + if cyclic is True: + return zip(a, chain(b, (first,))) + return zip(a, b) + + +def groups(many_to_one): + """Converts a many-to-one mapping into a one-to-many mapping. + + `many_to_one` must be a dictionary whose keys and values are all + :term:`hashable`. + + The return value is a dictionary mapping values from `many_to_one` + to sets of keys from `many_to_one` that have that value. + + Examples + -------- + >>> from networkx.utils import groups + >>> many_to_one = {"a": 1, "b": 1, "c": 2, "d": 3, "e": 3} + >>> groups(many_to_one) # doctest: +SKIP + {1: {'a', 'b'}, 2: {'c'}, 3: {'e', 'd'}} + """ + one_to_many = defaultdict(set) + for v, k in many_to_one.items(): + one_to_many[k].add(v) + return dict(one_to_many) + + +def create_random_state(random_state=None): + """Returns a numpy.random.RandomState or numpy.random.Generator instance + depending on input. + + Parameters + ---------- + random_state : int or NumPy RandomState or Generator instance, optional (default=None) + If int, return a numpy.random.RandomState instance set with seed=int. + if `numpy.random.RandomState` instance, return it. + if `numpy.random.Generator` instance, return it. + if None or numpy.random, return the global random number generator used + by numpy.random. + """ + import numpy as np + + if random_state is None or random_state is np.random: + return np.random.mtrand._rand + if isinstance(random_state, np.random.RandomState): + return random_state + if isinstance(random_state, int): + return np.random.RandomState(random_state) + if isinstance(random_state, np.random.Generator): + return random_state + msg = ( + f"{random_state} cannot be used to create a numpy.random.RandomState or\n" + "numpy.random.Generator instance" + ) + raise ValueError(msg) + + +class PythonRandomInterface: + def __init__(self, rng=None): + try: + import numpy as np + except ImportError: + msg = "numpy not found, only random.random available." + warnings.warn(msg, ImportWarning) + + if rng is None: + self._rng = np.random.mtrand._rand + else: + self._rng = rng + + def random(self): + return self._rng.random() + + def uniform(self, a, b): + return a + (b - a) * self._rng.random() + + def randrange(self, a, b=None): + import numpy as np + + if isinstance(self._rng, np.random.Generator): + return self._rng.integers(a, b) + return self._rng.randint(a, b) + + # NOTE: the numpy implementations of `choice` don't support strings, so + # this cannot be replaced with self._rng.choice + def choice(self, seq): + import numpy as np + + if isinstance(self._rng, np.random.Generator): + idx = self._rng.integers(0, len(seq)) + else: + idx = self._rng.randint(0, len(seq)) + return seq[idx] + + def gauss(self, mu, sigma): + return self._rng.normal(mu, sigma) + + def shuffle(self, seq): + return self._rng.shuffle(seq) + + # Some methods don't match API for numpy RandomState. + # Commented out versions are not used by NetworkX + + def sample(self, seq, k): + return self._rng.choice(list(seq), size=(k,), replace=False) + + def randint(self, a, b): + import numpy as np + + if isinstance(self._rng, np.random.Generator): + return self._rng.integers(a, b + 1) + return self._rng.randint(a, b + 1) + + # exponential as expovariate with 1/argument, + def expovariate(self, scale): + return self._rng.exponential(1 / scale) + + # pareto as paretovariate with 1/argument, + def paretovariate(self, shape): + return self._rng.pareto(shape) + + +# weibull as weibullvariate multiplied by beta, +# def weibullvariate(self, alpha, beta): +# return self._rng.weibull(alpha) * beta +# +# def triangular(self, low, high, mode): +# return self._rng.triangular(low, mode, high) +# +# def choices(self, seq, weights=None, cum_weights=None, k=1): +# return self._rng.choice(seq + + +def create_py_random_state(random_state=None): + """Returns a random.Random instance depending on input. + + Parameters + ---------- + random_state : int or random number generator or None (default=None) + If int, return a random.Random instance set with seed=int. + if random.Random instance, return it. + if None or the `random` package, return the global random number + generator used by `random`. + if np.random package, return the global numpy random number + generator wrapped in a PythonRandomInterface class. + if np.random.RandomState or np.random.Generator instance, return it + wrapped in PythonRandomInterface + if a PythonRandomInterface instance, return it + """ + import random + + try: + import numpy as np + + if random_state is np.random: + return PythonRandomInterface(np.random.mtrand._rand) + if isinstance(random_state, (np.random.RandomState, np.random.Generator)): + return PythonRandomInterface(random_state) + if isinstance(random_state, PythonRandomInterface): + return random_state + except ImportError: + pass + + if random_state is None or random_state is random: + return random._inst + if isinstance(random_state, random.Random): + return random_state + if isinstance(random_state, int): + return random.Random(random_state) + msg = f"{random_state} cannot be used to generate a random.Random instance" + raise ValueError(msg) + + +def nodes_equal(nodes1, nodes2): + """Check if nodes are equal. + + Equality here means equal as Python objects. + Node data must match if included. + The order of nodes is not relevant. + + Parameters + ---------- + nodes1, nodes2 : iterables of nodes, or (node, datadict) tuples + + Returns + ------- + bool + True if nodes are equal, False otherwise. + """ + nlist1 = list(nodes1) + nlist2 = list(nodes2) + try: + d1 = dict(nlist1) + d2 = dict(nlist2) + except (ValueError, TypeError): + d1 = dict.fromkeys(nlist1) + d2 = dict.fromkeys(nlist2) + return d1 == d2 + + +def edges_equal(edges1, edges2): + """Check if edges are equal. + + Equality here means equal as Python objects. + Edge data must match if included. + The order of the edges is not relevant. + + Parameters + ---------- + edges1, edges2 : iterables of with u, v nodes as + edge tuples (u, v), or + edge tuples with data dicts (u, v, d), or + edge tuples with keys and data dicts (u, v, k, d) + + Returns + ------- + bool + True if edges are equal, False otherwise. + """ + from collections import defaultdict + + d1 = defaultdict(dict) + d2 = defaultdict(dict) + c1 = 0 + for c1, e in enumerate(edges1): + u, v = e[0], e[1] + data = [e[2:]] + if v in d1[u]: + data = d1[u][v] + data + d1[u][v] = data + d1[v][u] = data + c2 = 0 + for c2, e in enumerate(edges2): + u, v = e[0], e[1] + data = [e[2:]] + if v in d2[u]: + data = d2[u][v] + data + d2[u][v] = data + d2[v][u] = data + if c1 != c2: + return False + # can check one direction because lengths are the same. + for n, nbrdict in d1.items(): + for nbr, datalist in nbrdict.items(): + if n not in d2: + return False + if nbr not in d2[n]: + return False + d2datalist = d2[n][nbr] + for data in datalist: + if datalist.count(data) != d2datalist.count(data): + return False + return True + + +def graphs_equal(graph1, graph2): + """Check if graphs are equal. + + Equality here means equal as Python objects (not isomorphism). + Node, edge and graph data must match. + + Parameters + ---------- + graph1, graph2 : graph + + Returns + ------- + bool + True if graphs are equal, False otherwise. + """ + return ( + graph1.adj == graph2.adj + and graph1.nodes == graph2.nodes + and graph1.graph == graph2.graph + ) diff --git a/phivenv/Lib/site-packages/networkx/utils/random_sequence.py b/phivenv/Lib/site-packages/networkx/utils/random_sequence.py new file mode 100644 index 0000000000000000000000000000000000000000..20a7b5e0a7fcc426ed9840f8bed2abf500e357e5 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/random_sequence.py @@ -0,0 +1,164 @@ +""" +Utilities for generating random numbers, random sequences, and +random selections. +""" + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = [ + "powerlaw_sequence", + "zipf_rv", + "cumulative_distribution", + "discrete_sequence", + "random_weighted_sample", + "weighted_choice", +] + + +# The same helpers for choosing random sequences from distributions +# uses Python's random module +# https://docs.python.org/3/library/random.html + + +@py_random_state(2) +def powerlaw_sequence(n, exponent=2.0, seed=None): + """ + Return sample sequence of length n from a power law distribution. + """ + return [seed.paretovariate(exponent - 1) for i in range(n)] + + +@py_random_state(2) +def zipf_rv(alpha, xmin=1, seed=None): + r"""Returns a random value chosen from the Zipf distribution. + + The return value is an integer drawn from the probability distribution + + .. math:: + + p(x)=\frac{x^{-\alpha}}{\zeta(\alpha, x_{\min})}, + + where $\zeta(\alpha, x_{\min})$ is the Hurwitz zeta function. + + Parameters + ---------- + alpha : float + Exponent value of the distribution + xmin : int + Minimum value + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + x : int + Random value from Zipf distribution + + Raises + ------ + ValueError: + If xmin < 1 or + If alpha <= 1 + + Notes + ----- + The rejection algorithm generates random values for a the power-law + distribution in uniformly bounded expected time dependent on + parameters. See [1]_ for details on its operation. + + Examples + -------- + >>> nx.utils.zipf_rv(alpha=2, xmin=3, seed=42) + 8 + + References + ---------- + .. [1] Luc Devroye, Non-Uniform Random Variate Generation, + Springer-Verlag, New York, 1986. + """ + if xmin < 1: + raise ValueError("xmin < 1") + if alpha <= 1: + raise ValueError("a <= 1.0") + a1 = alpha - 1.0 + b = 2**a1 + while True: + u = 1.0 - seed.random() # u in (0,1] + v = seed.random() # v in [0,1) + x = int(xmin * u ** -(1.0 / a1)) + t = (1.0 + (1.0 / x)) ** a1 + if v * x * (t - 1.0) / (b - 1.0) <= t / b: + break + return x + + +def cumulative_distribution(distribution): + """Returns normalized cumulative distribution from discrete distribution.""" + + cdf = [0.0] + psum = sum(distribution) + for i in range(len(distribution)): + cdf.append(cdf[i] + distribution[i] / psum) + return cdf + + +@py_random_state(3) +def discrete_sequence(n, distribution=None, cdistribution=None, seed=None): + """ + Return sample sequence of length n from a given discrete distribution + or discrete cumulative distribution. + + One of the following must be specified. + + distribution = histogram of values, will be normalized + + cdistribution = normalized discrete cumulative distribution + + """ + import bisect + + if cdistribution is not None: + cdf = cdistribution + elif distribution is not None: + cdf = cumulative_distribution(distribution) + else: + raise nx.NetworkXError( + "discrete_sequence: distribution or cdistribution missing" + ) + + # get a uniform random number + inputseq = [seed.random() for i in range(n)] + + # choose from CDF + seq = [bisect.bisect_left(cdf, s) - 1 for s in inputseq] + return seq + + +@py_random_state(2) +def random_weighted_sample(mapping, k, seed=None): + """Returns k items without replacement from a weighted sample. + + The input is a dictionary of items with weights as values. + """ + if k > len(mapping): + raise ValueError("sample larger than population") + sample = set() + while len(sample) < k: + sample.add(weighted_choice(mapping, seed)) + return list(sample) + + +@py_random_state(1) +def weighted_choice(mapping, seed=None): + """Returns a single element from a weighted sample. + + The input is a dictionary of items with weights as values. + """ + # use roulette method + rnd = seed.random() * sum(mapping.values()) + for k, w in mapping.items(): + rnd -= w + if rnd < 0: + return k diff --git a/phivenv/Lib/site-packages/networkx/utils/rcm.py b/phivenv/Lib/site-packages/networkx/utils/rcm.py new file mode 100644 index 0000000000000000000000000000000000000000..f9e1bfee69d785a7ecaa4800a75616cbd8ac399b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/rcm.py @@ -0,0 +1,158 @@ +""" +Cuthill-McKee ordering of graph nodes to produce sparse matrices +""" +from collections import deque +from operator import itemgetter + +import networkx as nx + +from ..utils import arbitrary_element + +__all__ = ["cuthill_mckee_ordering", "reverse_cuthill_mckee_ordering"] + + +def cuthill_mckee_ordering(G, heuristic=None): + """Generate an ordering (permutation) of the graph nodes to make + a sparse matrix. + + Uses the Cuthill-McKee heuristic (based on breadth-first search) [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + heuristic : function, optional + Function to choose starting node for RCM algorithm. If None + a node from a pseudo-peripheral pair is used. A user-defined function + can be supplied that takes a graph object and returns a single node. + + Returns + ------- + nodes : generator + Generator of nodes in Cuthill-McKee ordering. + + Examples + -------- + >>> from networkx.utils import cuthill_mckee_ordering + >>> G = nx.path_graph(4) + >>> rcm = list(cuthill_mckee_ordering(G)) + >>> A = nx.adjacency_matrix(G, nodelist=rcm) + + Smallest degree node as heuristic function: + + >>> def smallest_degree(G): + ... return min(G, key=G.degree) + >>> rcm = list(cuthill_mckee_ordering(G, heuristic=smallest_degree)) + + + See Also + -------- + reverse_cuthill_mckee_ordering + + Notes + ----- + The optimal solution the bandwidth reduction is NP-complete [2]_. + + + References + ---------- + .. [1] E. Cuthill and J. McKee. + Reducing the bandwidth of sparse symmetric matrices, + In Proc. 24th Nat. Conf. ACM, pages 157-172, 1969. + http://doi.acm.org/10.1145/800195.805928 + .. [2] Steven S. Skiena. 1997. The Algorithm Design Manual. + Springer-Verlag New York, Inc., New York, NY, USA. + """ + for c in nx.connected_components(G): + yield from connected_cuthill_mckee_ordering(G.subgraph(c), heuristic) + + +def reverse_cuthill_mckee_ordering(G, heuristic=None): + """Generate an ordering (permutation) of the graph nodes to make + a sparse matrix. + + Uses the reverse Cuthill-McKee heuristic (based on breadth-first search) + [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + heuristic : function, optional + Function to choose starting node for RCM algorithm. If None + a node from a pseudo-peripheral pair is used. A user-defined function + can be supplied that takes a graph object and returns a single node. + + Returns + ------- + nodes : generator + Generator of nodes in reverse Cuthill-McKee ordering. + + Examples + -------- + >>> from networkx.utils import reverse_cuthill_mckee_ordering + >>> G = nx.path_graph(4) + >>> rcm = list(reverse_cuthill_mckee_ordering(G)) + >>> A = nx.adjacency_matrix(G, nodelist=rcm) + + Smallest degree node as heuristic function: + + >>> def smallest_degree(G): + ... return min(G, key=G.degree) + >>> rcm = list(reverse_cuthill_mckee_ordering(G, heuristic=smallest_degree)) + + + See Also + -------- + cuthill_mckee_ordering + + Notes + ----- + The optimal solution the bandwidth reduction is NP-complete [2]_. + + References + ---------- + .. [1] E. Cuthill and J. McKee. + Reducing the bandwidth of sparse symmetric matrices, + In Proc. 24th Nat. Conf. ACM, pages 157-72, 1969. + http://doi.acm.org/10.1145/800195.805928 + .. [2] Steven S. Skiena. 1997. The Algorithm Design Manual. + Springer-Verlag New York, Inc., New York, NY, USA. + """ + return reversed(list(cuthill_mckee_ordering(G, heuristic=heuristic))) + + +def connected_cuthill_mckee_ordering(G, heuristic=None): + # the cuthill mckee algorithm for connected graphs + if heuristic is None: + start = pseudo_peripheral_node(G) + else: + start = heuristic(G) + visited = {start} + queue = deque([start]) + while queue: + parent = queue.popleft() + yield parent + nd = sorted(G.degree(set(G[parent]) - visited), key=itemgetter(1)) + children = [n for n, d in nd] + visited.update(children) + queue.extend(children) + + +def pseudo_peripheral_node(G): + # helper for cuthill-mckee to find a node in a "pseudo peripheral pair" + # to use as good starting node + u = arbitrary_element(G) + lp = 0 + v = u + while True: + spl = dict(nx.shortest_path_length(G, v)) + l = max(spl.values()) + if l <= lp: + break + lp = l + farthest = (n for n, dist in spl.items() if dist == l) + v, deg = min(G.degree(farthest), key=itemgetter(1)) + return v diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/__init__.py b/phivenv/Lib/site-packages/networkx/utils/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..213dd99ec51e33605bbd288d4f43c2178c471953 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test__init.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test__init.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a703fc97146003c74e78368d316c13544e3cf5d9 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test__init.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_decorators.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_decorators.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8a9751ba346c25a0d3ed6b69c634dd657f75d19 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_decorators.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_heaps.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_heaps.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98609c344ef4dc439b8af22aa9891b98c4acea3c Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_heaps.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_mapped_queue.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_mapped_queue.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09f3831c40e5239837bc72e5340186a1cb5d48dd Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_mapped_queue.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_misc.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_misc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a0182001d25fe38fda70d3d7f9f07afc1f50789 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_misc.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_random_sequence.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_random_sequence.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c44114288f70fd127fbe72f4df8ae1588693bc1b Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_random_sequence.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_rcm.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_rcm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e04fe56a15cba5b2587148b224a863ec903c518e Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_rcm.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_unionfind.cpython-39.pyc b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_unionfind.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6062b8632d1d09e307e0fc43e1a503b47c6aa78 Binary files /dev/null and b/phivenv/Lib/site-packages/networkx/utils/tests/__pycache__/test_unionfind.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/test__init.py b/phivenv/Lib/site-packages/networkx/utils/tests/test__init.py new file mode 100644 index 0000000000000000000000000000000000000000..ecbcce36df7cd37781dd45879f63f7d6f55e5567 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/tests/test__init.py @@ -0,0 +1,11 @@ +import pytest + + +def test_utils_namespace(): + """Ensure objects are not unintentionally exposed in utils namespace.""" + with pytest.raises(ImportError): + from networkx.utils import nx + with pytest.raises(ImportError): + from networkx.utils import sys + with pytest.raises(ImportError): + from networkx.utils import defaultdict, deque diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/test_decorators.py b/phivenv/Lib/site-packages/networkx/utils/tests/test_decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..f74dd9a0762ae4962c7c99271c4abdd63325ce4b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/tests/test_decorators.py @@ -0,0 +1,491 @@ +import os +import pathlib +import random +import tempfile + +import pytest + +import networkx as nx +from networkx.utils.decorators import ( + argmap, + not_implemented_for, + np_random_state, + open_file, + py_random_state, +) +from networkx.utils.misc import PythonRandomInterface + + +def test_not_implemented_decorator(): + @not_implemented_for("directed") + def test_d(G): + pass + + test_d(nx.Graph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_d(nx.DiGraph()) + + @not_implemented_for("undirected") + def test_u(G): + pass + + test_u(nx.DiGraph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_u(nx.Graph()) + + @not_implemented_for("multigraph") + def test_m(G): + pass + + test_m(nx.Graph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_m(nx.MultiGraph()) + + @not_implemented_for("graph") + def test_g(G): + pass + + test_g(nx.MultiGraph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_g(nx.Graph()) + + # not MultiDiGraph (multiple arguments => AND) + @not_implemented_for("directed", "multigraph") + def test_not_md(G): + pass + + test_not_md(nx.Graph()) + test_not_md(nx.DiGraph()) + test_not_md(nx.MultiGraph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_not_md(nx.MultiDiGraph()) + + # Graph only (multiple decorators => OR) + @not_implemented_for("directed") + @not_implemented_for("multigraph") + def test_graph_only(G): + pass + + test_graph_only(nx.Graph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_graph_only(nx.DiGraph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_graph_only(nx.MultiGraph()) + with pytest.raises(nx.NetworkXNotImplemented): + test_graph_only(nx.MultiDiGraph()) + + with pytest.raises(ValueError): + not_implemented_for("directed", "undirected") + + with pytest.raises(ValueError): + not_implemented_for("multigraph", "graph") + + +def test_not_implemented_decorator_key(): + with pytest.raises(KeyError): + + @not_implemented_for("foo") + def test1(G): + pass + + test1(nx.Graph()) + + +def test_not_implemented_decorator_raise(): + with pytest.raises(nx.NetworkXNotImplemented): + + @not_implemented_for("graph") + def test1(G): + pass + + test1(nx.Graph()) + + +class TestOpenFileDecorator: + def setup_method(self): + self.text = ["Blah... ", "BLAH ", "BLAH!!!!"] + self.fobj = tempfile.NamedTemporaryFile("wb+", delete=False) + self.name = self.fobj.name + + def teardown_method(self): + self.fobj.close() + os.unlink(self.name) + + def write(self, path): + for text in self.text: + path.write(text.encode("ascii")) + + @open_file(1, "r") + def read(self, path): + return path.readlines()[0] + + @staticmethod + @open_file(0, "wb") + def writer_arg0(path): + path.write(b"demo") + + @open_file(1, "wb+") + def writer_arg1(self, path): + self.write(path) + + @open_file(2, "wb") + def writer_arg2default(self, x, path=None): + if path is None: + with tempfile.NamedTemporaryFile("wb+") as fh: + self.write(fh) + else: + self.write(path) + + @open_file(4, "wb") + def writer_arg4default(self, x, y, other="hello", path=None, **kwargs): + if path is None: + with tempfile.NamedTemporaryFile("wb+") as fh: + self.write(fh) + else: + self.write(path) + + @open_file("path", "wb") + def writer_kwarg(self, **kwargs): + path = kwargs.get("path", None) + if path is None: + with tempfile.NamedTemporaryFile("wb+") as fh: + self.write(fh) + else: + self.write(path) + + def test_writer_arg0_str(self): + self.writer_arg0(self.name) + + def test_writer_arg0_fobj(self): + self.writer_arg0(self.fobj) + + def test_writer_arg0_pathlib(self): + self.writer_arg0(pathlib.Path(self.name)) + + def test_writer_arg1_str(self): + self.writer_arg1(self.name) + assert self.read(self.name) == "".join(self.text) + + def test_writer_arg1_fobj(self): + self.writer_arg1(self.fobj) + assert not self.fobj.closed + self.fobj.close() + assert self.read(self.name) == "".join(self.text) + + def test_writer_arg2default_str(self): + self.writer_arg2default(0, path=None) + self.writer_arg2default(0, path=self.name) + assert self.read(self.name) == "".join(self.text) + + def test_writer_arg2default_fobj(self): + self.writer_arg2default(0, path=self.fobj) + assert not self.fobj.closed + self.fobj.close() + assert self.read(self.name) == "".join(self.text) + + def test_writer_arg2default_fobj_path_none(self): + self.writer_arg2default(0, path=None) + + def test_writer_arg4default_fobj(self): + self.writer_arg4default(0, 1, dog="dog", other="other") + self.writer_arg4default(0, 1, dog="dog", other="other", path=self.name) + assert self.read(self.name) == "".join(self.text) + + def test_writer_kwarg_str(self): + self.writer_kwarg(path=self.name) + assert self.read(self.name) == "".join(self.text) + + def test_writer_kwarg_fobj(self): + self.writer_kwarg(path=self.fobj) + self.fobj.close() + assert self.read(self.name) == "".join(self.text) + + def test_writer_kwarg_path_none(self): + self.writer_kwarg(path=None) + + +class TestRandomState: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + + @np_random_state(1) + def instantiate_np_random_state(self, random_state): + assert isinstance(random_state, np.random.RandomState) + return random_state.random_sample() + + @py_random_state(1) + def instantiate_py_random_state(self, random_state): + assert isinstance(random_state, (random.Random, PythonRandomInterface)) + return random_state.random() + + def test_random_state_None(self): + np.random.seed(42) + rv = np.random.random_sample() + np.random.seed(42) + assert rv == self.instantiate_np_random_state(None) + + random.seed(42) + rv = random.random() + random.seed(42) + assert rv == self.instantiate_py_random_state(None) + + def test_random_state_np_random(self): + np.random.seed(42) + rv = np.random.random_sample() + np.random.seed(42) + assert rv == self.instantiate_np_random_state(np.random) + np.random.seed(42) + assert rv == self.instantiate_py_random_state(np.random) + + def test_random_state_int(self): + np.random.seed(42) + np_rv = np.random.random_sample() + random.seed(42) + py_rv = random.random() + + np.random.seed(42) + seed = 1 + rval = self.instantiate_np_random_state(seed) + rval_expected = np.random.RandomState(seed).rand() + assert rval, rval_expected + # test that global seed wasn't changed in function + assert np_rv == np.random.random_sample() + + random.seed(42) + rval = self.instantiate_py_random_state(seed) + rval_expected = random.Random(seed).random() + assert rval, rval_expected + # test that global seed wasn't changed in function + assert py_rv == random.random() + + def test_random_state_np_random_RandomState(self): + np.random.seed(42) + np_rv = np.random.random_sample() + + np.random.seed(42) + seed = 1 + rng = np.random.RandomState(seed) + rval = self.instantiate_np_random_state(seed) + rval_expected = np.random.RandomState(seed).rand() + assert rval, rval_expected + + rval = self.instantiate_py_random_state(seed) + rval_expected = np.random.RandomState(seed).rand() + assert rval, rval_expected + # test that global seed wasn't changed in function + assert np_rv == np.random.random_sample() + + def test_random_state_py_random(self): + seed = 1 + rng = random.Random(seed) + rv = self.instantiate_py_random_state(rng) + assert rv, random.Random(seed).random() + + pytest.raises(ValueError, self.instantiate_np_random_state, rng) + + +def test_random_state_string_arg_index(): + with pytest.raises(nx.NetworkXError): + + @np_random_state("a") + def make_random_state(rs): + pass + + rstate = make_random_state(1) + + +def test_py_random_state_string_arg_index(): + with pytest.raises(nx.NetworkXError): + + @py_random_state("a") + def make_random_state(rs): + pass + + rstate = make_random_state(1) + + +def test_random_state_invalid_arg_index(): + with pytest.raises(nx.NetworkXError): + + @np_random_state(2) + def make_random_state(rs): + pass + + rstate = make_random_state(1) + + +def test_py_random_state_invalid_arg_index(): + with pytest.raises(nx.NetworkXError): + + @py_random_state(2) + def make_random_state(rs): + pass + + rstate = make_random_state(1) + + +class TestArgmap: + class ArgmapError(RuntimeError): + pass + + def test_trivial_function(self): + def do_not_call(x): + raise ArgmapError("do not call this function") + + @argmap(do_not_call) + def trivial_argmap(): + return 1 + + assert trivial_argmap() == 1 + + def test_trivial_iterator(self): + def do_not_call(x): + raise ArgmapError("do not call this function") + + @argmap(do_not_call) + def trivial_argmap(): + yield from (1, 2, 3) + + assert tuple(trivial_argmap()) == (1, 2, 3) + + def test_contextmanager(self): + container = [] + + def contextmanager(x): + nonlocal container + return x, lambda: container.append(x) + + @argmap(contextmanager, 0, 1, 2, try_finally=True) + def foo(x, y, z): + return x, y, z + + x, y, z = foo("a", "b", "c") + + # context exits are called in reverse + assert container == ["c", "b", "a"] + + def test_tryfinally_generator(self): + container = [] + + def singleton(x): + return (x,) + + with pytest.raises(nx.NetworkXError): + + @argmap(singleton, 0, 1, 2, try_finally=True) + def foo(x, y, z): + yield from (x, y, z) + + @argmap(singleton, 0, 1, 2) + def foo(x, y, z): + return x + y + z + + q = foo("a", "b", "c") + + assert q == ("a", "b", "c") + + def test_actual_vararg(self): + @argmap(lambda x: -x, 4) + def foo(x, y, *args): + return (x, y) + tuple(args) + + assert foo(1, 2, 3, 4, 5, 6) == (1, 2, 3, 4, -5, 6) + + def test_signature_destroying_intermediate_decorator(self): + def add_one_to_first_bad_decorator(f): + """Bad because it doesn't wrap the f signature (clobbers it)""" + + def decorated(a, *args, **kwargs): + return f(a + 1, *args, **kwargs) + + return decorated + + add_two_to_second = argmap(lambda b: b + 2, 1) + + @add_two_to_second + @add_one_to_first_bad_decorator + def add_one_and_two(a, b): + return a, b + + assert add_one_and_two(5, 5) == (6, 7) + + def test_actual_kwarg(self): + @argmap(lambda x: -x, "arg") + def foo(*, arg): + return arg + + assert foo(arg=3) == -3 + + def test_nested_tuple(self): + def xform(x, y): + u, v = y + return x + u + v, (x + u, x + v) + + # we're testing args and kwargs here, too + @argmap(xform, (0, ("t", 2))) + def foo(a, *args, **kwargs): + return a, args, kwargs + + a, args, kwargs = foo(1, 2, 3, t=4) + + assert a == 1 + 4 + 3 + assert args == (2, 1 + 3) + assert kwargs == {"t": 1 + 4} + + def test_flatten(self): + assert tuple(argmap._flatten([[[[[], []], [], []], [], [], []]], set())) == () + + rlist = ["a", ["b", "c"], [["d"], "e"], "f"] + assert "".join(argmap._flatten(rlist, set())) == "abcdef" + + def test_indent(self): + code = "\n".join( + argmap._indent( + *[ + "try:", + "try:", + "pass#", + "finally:", + "pass#", + "#", + "finally:", + "pass#", + ] + ) + ) + assert ( + code + == """try: + try: + pass# + finally: + pass# + # +finally: + pass#""" + ) + + def test_immediate_raise(self): + @not_implemented_for("directed") + def yield_nodes(G): + yield from G + + G = nx.Graph([(1, 2)]) + D = nx.DiGraph() + + # test first call (argmap is compiled and executed) + with pytest.raises(nx.NetworkXNotImplemented): + node_iter = yield_nodes(D) + + # test second call (argmap is only executed) + with pytest.raises(nx.NetworkXNotImplemented): + node_iter = yield_nodes(D) + + # ensure that generators still make generators + node_iter = yield_nodes(G) + next(node_iter) + next(node_iter) + with pytest.raises(StopIteration): + next(node_iter) diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/test_heaps.py b/phivenv/Lib/site-packages/networkx/utils/tests/test_heaps.py new file mode 100644 index 0000000000000000000000000000000000000000..5ea3871638688ed466b72bf3c99c977913a503dc --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/tests/test_heaps.py @@ -0,0 +1,131 @@ +import pytest + +import networkx as nx +from networkx.utils import BinaryHeap, PairingHeap + + +class X: + def __eq__(self, other): + raise self is other + + def __ne__(self, other): + raise self is not other + + def __lt__(self, other): + raise TypeError("cannot compare") + + def __le__(self, other): + raise TypeError("cannot compare") + + def __ge__(self, other): + raise TypeError("cannot compare") + + def __gt__(self, other): + raise TypeError("cannot compare") + + def __hash__(self): + return hash(id(self)) + + +x = X() + + +data = [ # min should not invent an element. + ("min", nx.NetworkXError), + # Popping an empty heap should fail. + ("pop", nx.NetworkXError), + # Getting nonexisting elements should return None. + ("get", 0, None), + ("get", x, None), + ("get", None, None), + # Inserting a new key should succeed. + ("insert", x, 1, True), + ("get", x, 1), + ("min", (x, 1)), + # min should not pop the top element. + ("min", (x, 1)), + # Inserting a new key of different type should succeed. + ("insert", 1, -2.0, True), + # int and float values should interop. + ("min", (1, -2.0)), + # pop removes minimum-valued element. + ("insert", 3, -(10**100), True), + ("insert", 4, 5, True), + ("pop", (3, -(10**100))), + ("pop", (1, -2.0)), + # Decrease-insert should succeed. + ("insert", 4, -50, True), + ("insert", 4, -60, False, True), + # Decrease-insert should not create duplicate keys. + ("pop", (4, -60)), + ("pop", (x, 1)), + # Popping all elements should empty the heap. + ("min", nx.NetworkXError), + ("pop", nx.NetworkXError), + # Non-value-changing insert should fail. + ("insert", x, 0, True), + ("insert", x, 0, False, False), + ("min", (x, 0)), + ("insert", x, 0, True, False), + ("min", (x, 0)), + # Failed insert should not create duplicate keys. + ("pop", (x, 0)), + ("pop", nx.NetworkXError), + # Increase-insert should succeed when allowed. + ("insert", None, 0, True), + ("insert", 2, -1, True), + ("min", (2, -1)), + ("insert", 2, 1, True, False), + ("min", (None, 0)), + # Increase-insert should fail when disallowed. + ("insert", None, 2, False, False), + ("min", (None, 0)), + # Failed increase-insert should not create duplicate keys. + ("pop", (None, 0)), + ("pop", (2, 1)), + ("min", nx.NetworkXError), + ("pop", nx.NetworkXError), +] + + +def _test_heap_class(cls, *args, **kwargs): + heap = cls(*args, **kwargs) + # Basic behavioral test + for op in data: + if op[-1] is not nx.NetworkXError: + assert op[-1] == getattr(heap, op[0])(*op[1:-1]) + else: + pytest.raises(op[-1], getattr(heap, op[0]), *op[1:-1]) + # Coverage test. + for i in range(99, -1, -1): + assert heap.insert(i, i) + for i in range(50): + assert heap.pop() == (i, i) + for i in range(100): + assert heap.insert(i, i) == (i < 50) + for i in range(100): + assert not heap.insert(i, i + 1) + for i in range(50): + assert heap.pop() == (i, i) + for i in range(100): + assert heap.insert(i, i + 1) == (i < 50) + for i in range(49): + assert heap.pop() == (i, i + 1) + assert sorted([heap.pop(), heap.pop()]) == [(49, 50), (50, 50)] + for i in range(51, 100): + assert not heap.insert(i, i + 1, True) + for i in range(51, 70): + assert heap.pop() == (i, i + 1) + for i in range(100): + assert heap.insert(i, i) + for i in range(100): + assert heap.pop() == (i, i) + pytest.raises(nx.NetworkXError, heap.pop) + + +def test_PairingHeap(): + _test_heap_class(PairingHeap) + + +def test_BinaryHeap(): + _test_heap_class(BinaryHeap) diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/test_mapped_queue.py b/phivenv/Lib/site-packages/networkx/utils/tests/test_mapped_queue.py new file mode 100644 index 0000000000000000000000000000000000000000..ca9b7e42072f5aebbf4b794302d06f21f5d8e17c --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/tests/test_mapped_queue.py @@ -0,0 +1,268 @@ +import pytest + +from networkx.utils.mapped_queue import MappedQueue, _HeapElement + + +def test_HeapElement_gtlt(): + bar = _HeapElement(1.1, "a") + foo = _HeapElement(1, "b") + assert foo < bar + assert bar > foo + assert foo < 1.1 + assert 1 < bar + + +def test_HeapElement_gtlt_tied_priority(): + bar = _HeapElement(1, "a") + foo = _HeapElement(1, "b") + assert foo > bar + assert bar < foo + + +def test_HeapElement_eq(): + bar = _HeapElement(1.1, "a") + foo = _HeapElement(1, "a") + assert foo == bar + assert bar == foo + assert foo == "a" + + +def test_HeapElement_iter(): + foo = _HeapElement(1, "a") + bar = _HeapElement(1.1, (3, 2, 1)) + assert list(foo) == [1, "a"] + assert list(bar) == [1.1, 3, 2, 1] + + +def test_HeapElement_getitem(): + foo = _HeapElement(1, "a") + bar = _HeapElement(1.1, (3, 2, 1)) + assert foo[1] == "a" + assert foo[0] == 1 + assert bar[0] == 1.1 + assert bar[2] == 2 + assert bar[3] == 1 + pytest.raises(IndexError, bar.__getitem__, 4) + pytest.raises(IndexError, foo.__getitem__, 2) + + +class TestMappedQueue: + def setup_method(self): + pass + + def _check_map(self, q): + assert q.position == {elt: pos for pos, elt in enumerate(q.heap)} + + def _make_mapped_queue(self, h): + q = MappedQueue() + q.heap = h + q.position = {elt: pos for pos, elt in enumerate(h)} + return q + + def test_heapify(self): + h = [5, 4, 3, 2, 1, 0] + q = self._make_mapped_queue(h) + q._heapify() + self._check_map(q) + + def test_init(self): + h = [5, 4, 3, 2, 1, 0] + q = MappedQueue(h) + self._check_map(q) + + def test_incomparable(self): + h = [5, 4, "a", 2, 1, 0] + pytest.raises(TypeError, MappedQueue, h) + + def test_len(self): + h = [5, 4, 3, 2, 1, 0] + q = MappedQueue(h) + self._check_map(q) + assert len(q) == 6 + + def test_siftup_leaf(self): + h = [2] + h_sifted = [2] + q = self._make_mapped_queue(h) + q._siftup(0) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftup_one_child(self): + h = [2, 0] + h_sifted = [0, 2] + q = self._make_mapped_queue(h) + q._siftup(0) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftup_left_child(self): + h = [2, 0, 1] + h_sifted = [0, 2, 1] + q = self._make_mapped_queue(h) + q._siftup(0) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftup_right_child(self): + h = [2, 1, 0] + h_sifted = [0, 1, 2] + q = self._make_mapped_queue(h) + q._siftup(0) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftup_multiple(self): + h = [0, 1, 2, 4, 3, 5, 6] + h_sifted = [0, 1, 2, 4, 3, 5, 6] + q = self._make_mapped_queue(h) + q._siftup(0) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftdown_leaf(self): + h = [2] + h_sifted = [2] + q = self._make_mapped_queue(h) + q._siftdown(0, 0) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftdown_single(self): + h = [1, 0] + h_sifted = [0, 1] + q = self._make_mapped_queue(h) + q._siftdown(0, len(h) - 1) + assert q.heap == h_sifted + self._check_map(q) + + def test_siftdown_multiple(self): + h = [1, 2, 3, 4, 5, 6, 7, 0] + h_sifted = [0, 1, 3, 2, 5, 6, 7, 4] + q = self._make_mapped_queue(h) + q._siftdown(0, len(h) - 1) + assert q.heap == h_sifted + self._check_map(q) + + def test_push(self): + to_push = [6, 1, 4, 3, 2, 5, 0] + h_sifted = [0, 2, 1, 6, 3, 5, 4] + q = MappedQueue() + for elt in to_push: + q.push(elt) + assert q.heap == h_sifted + self._check_map(q) + + def test_push_duplicate(self): + to_push = [2, 1, 0] + h_sifted = [0, 2, 1] + q = MappedQueue() + for elt in to_push: + inserted = q.push(elt) + assert inserted + assert q.heap == h_sifted + self._check_map(q) + inserted = q.push(1) + assert not inserted + + def test_pop(self): + h = [3, 4, 6, 0, 1, 2, 5] + h_sorted = sorted(h) + q = self._make_mapped_queue(h) + q._heapify() + popped = [q.pop() for _ in range(len(h))] + assert popped == h_sorted + self._check_map(q) + + def test_remove_leaf(self): + h = [0, 2, 1, 6, 3, 5, 4] + h_removed = [0, 2, 1, 6, 4, 5] + q = self._make_mapped_queue(h) + removed = q.remove(3) + assert q.heap == h_removed + + def test_remove_root(self): + h = [0, 2, 1, 6, 3, 5, 4] + h_removed = [1, 2, 4, 6, 3, 5] + q = self._make_mapped_queue(h) + removed = q.remove(0) + assert q.heap == h_removed + + def test_update_leaf(self): + h = [0, 20, 10, 60, 30, 50, 40] + h_updated = [0, 15, 10, 60, 20, 50, 40] + q = self._make_mapped_queue(h) + removed = q.update(30, 15) + assert q.heap == h_updated + + def test_update_root(self): + h = [0, 20, 10, 60, 30, 50, 40] + h_updated = [10, 20, 35, 60, 30, 50, 40] + q = self._make_mapped_queue(h) + removed = q.update(0, 35) + assert q.heap == h_updated + + +class TestMappedDict(TestMappedQueue): + def _make_mapped_queue(self, h): + priority_dict = {elt: elt for elt in h} + return MappedQueue(priority_dict) + + def test_init(self): + d = {5: 0, 4: 1, "a": 2, 2: 3, 1: 4} + q = MappedQueue(d) + assert q.position == d + + def test_ties(self): + d = {5: 0, 4: 1, 3: 2, 2: 3, 1: 4} + q = MappedQueue(d) + assert q.position == {elt: pos for pos, elt in enumerate(q.heap)} + + def test_pop(self): + d = {5: 0, 4: 1, 3: 2, 2: 3, 1: 4} + q = MappedQueue(d) + assert q.pop() == _HeapElement(0, 5) + assert q.position == {elt: pos for pos, elt in enumerate(q.heap)} + + def test_empty_pop(self): + q = MappedQueue() + pytest.raises(IndexError, q.pop) + + def test_incomparable_ties(self): + d = {5: 0, 4: 0, "a": 0, 2: 0, 1: 0} + pytest.raises(TypeError, MappedQueue, d) + + def test_push(self): + to_push = [6, 1, 4, 3, 2, 5, 0] + h_sifted = [0, 2, 1, 6, 3, 5, 4] + q = MappedQueue() + for elt in to_push: + q.push(elt, priority=elt) + assert q.heap == h_sifted + self._check_map(q) + + def test_push_duplicate(self): + to_push = [2, 1, 0] + h_sifted = [0, 2, 1] + q = MappedQueue() + for elt in to_push: + inserted = q.push(elt, priority=elt) + assert inserted + assert q.heap == h_sifted + self._check_map(q) + inserted = q.push(1, priority=1) + assert not inserted + + def test_update_leaf(self): + h = [0, 20, 10, 60, 30, 50, 40] + h_updated = [0, 15, 10, 60, 20, 50, 40] + q = self._make_mapped_queue(h) + removed = q.update(30, 15, priority=15) + assert q.heap == h_updated + + def test_update_root(self): + h = [0, 20, 10, 60, 30, 50, 40] + h_updated = [10, 20, 35, 60, 30, 50, 40] + q = self._make_mapped_queue(h) + removed = q.update(0, 35, priority=35) + assert q.heap == h_updated diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/test_misc.py b/phivenv/Lib/site-packages/networkx/utils/tests/test_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..18d2878f635f3e7928d83c6acf4bd52cc27c7642 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/tests/test_misc.py @@ -0,0 +1,255 @@ +import random +from copy import copy + +import pytest + +import networkx as nx +from networkx.utils import ( + PythonRandomInterface, + arbitrary_element, + create_py_random_state, + create_random_state, + dict_to_numpy_array, + discrete_sequence, + flatten, + groups, + make_list_of_ints, + pairwise, + powerlaw_sequence, +) +from networkx.utils.misc import _dict_to_numpy_array1, _dict_to_numpy_array2 + +nested_depth = ( + 1, + 2, + (3, 4, ((5, 6, (7,), (8, (9, 10), 11), (12, 13, (14, 15)), 16), 17), 18, 19), + 20, +) + +nested_set = { + (1, 2, 3, 4), + (5, 6, 7, 8, 9), + (10, 11, (12, 13, 14), (15, 16, 17, 18)), + 19, + 20, +} + +nested_mixed = [ + 1, + (2, 3, {4, (5, 6), 7}, [8, 9]), + {10: "foo", 11: "bar", (12, 13): "baz"}, + {(14, 15): "qwe", 16: "asd"}, + (17, (18, "19"), 20), +] + + +@pytest.mark.parametrize("result", [None, [], ["existing"], ["existing1", "existing2"]]) +@pytest.mark.parametrize("nested", [nested_depth, nested_mixed, nested_set]) +def test_flatten(nested, result): + if result is None: + val = flatten(nested, result) + assert len(val) == 20 + else: + _result = copy(result) # because pytest passes parameters as is + nexisting = len(_result) + val = flatten(nested, _result) + assert len(val) == len(_result) == 20 + nexisting + + assert issubclass(type(val), tuple) + + +def test_make_list_of_ints(): + mylist = [1, 2, 3.0, 42, -2] + assert make_list_of_ints(mylist) is mylist + assert make_list_of_ints(mylist) == mylist + assert type(make_list_of_ints(mylist)[2]) is int + pytest.raises(nx.NetworkXError, make_list_of_ints, [1, 2, 3, "kermit"]) + pytest.raises(nx.NetworkXError, make_list_of_ints, [1, 2, 3.1]) + + +def test_random_number_distribution(): + # smoke test only + z = powerlaw_sequence(20, exponent=2.5) + z = discrete_sequence(20, distribution=[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3]) + + +class TestNumpyArray: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + + def test_numpy_to_list_of_ints(self): + a = np.array([1, 2, 3], dtype=np.int64) + b = np.array([1.0, 2, 3]) + c = np.array([1.1, 2, 3]) + assert type(make_list_of_ints(a)) == list + assert make_list_of_ints(b) == list(b) + B = make_list_of_ints(b) + assert type(B[0]) == int + pytest.raises(nx.NetworkXError, make_list_of_ints, c) + + def test__dict_to_numpy_array1(self): + d = {"a": 1, "b": 2} + a = _dict_to_numpy_array1(d, mapping={"a": 0, "b": 1}) + np.testing.assert_allclose(a, np.array([1, 2])) + a = _dict_to_numpy_array1(d, mapping={"b": 0, "a": 1}) + np.testing.assert_allclose(a, np.array([2, 1])) + + a = _dict_to_numpy_array1(d) + np.testing.assert_allclose(a.sum(), 3) + + def test__dict_to_numpy_array2(self): + d = {"a": {"a": 1, "b": 2}, "b": {"a": 10, "b": 20}} + + mapping = {"a": 1, "b": 0} + a = _dict_to_numpy_array2(d, mapping=mapping) + np.testing.assert_allclose(a, np.array([[20, 10], [2, 1]])) + + a = _dict_to_numpy_array2(d) + np.testing.assert_allclose(a.sum(), 33) + + def test_dict_to_numpy_array_a(self): + d = {"a": {"a": 1, "b": 2}, "b": {"a": 10, "b": 20}} + + mapping = {"a": 0, "b": 1} + a = dict_to_numpy_array(d, mapping=mapping) + np.testing.assert_allclose(a, np.array([[1, 2], [10, 20]])) + + mapping = {"a": 1, "b": 0} + a = dict_to_numpy_array(d, mapping=mapping) + np.testing.assert_allclose(a, np.array([[20, 10], [2, 1]])) + + a = _dict_to_numpy_array2(d) + np.testing.assert_allclose(a.sum(), 33) + + def test_dict_to_numpy_array_b(self): + d = {"a": 1, "b": 2} + + mapping = {"a": 0, "b": 1} + a = dict_to_numpy_array(d, mapping=mapping) + np.testing.assert_allclose(a, np.array([1, 2])) + + a = _dict_to_numpy_array1(d) + np.testing.assert_allclose(a.sum(), 3) + + +def test_pairwise(): + nodes = range(4) + node_pairs = [(0, 1), (1, 2), (2, 3)] + node_pairs_cycle = node_pairs + [(3, 0)] + assert list(pairwise(nodes)) == node_pairs + assert list(pairwise(iter(nodes))) == node_pairs + assert list(pairwise(nodes, cyclic=True)) == node_pairs_cycle + empty_iter = iter(()) + assert list(pairwise(empty_iter)) == [] + empty_iter = iter(()) + assert list(pairwise(empty_iter, cyclic=True)) == [] + + +def test_groups(): + many_to_one = dict(zip("abcde", [0, 0, 1, 1, 2])) + actual = groups(many_to_one) + expected = {0: {"a", "b"}, 1: {"c", "d"}, 2: {"e"}} + assert actual == expected + assert {} == groups({}) + + +def test_create_random_state(): + np = pytest.importorskip("numpy") + rs = np.random.RandomState + + assert isinstance(create_random_state(1), rs) + assert isinstance(create_random_state(None), rs) + assert isinstance(create_random_state(np.random), rs) + assert isinstance(create_random_state(rs(1)), rs) + # Support for numpy.random.Generator + rng = np.random.default_rng() + assert isinstance(create_random_state(rng), np.random.Generator) + pytest.raises(ValueError, create_random_state, "a") + + assert np.all(rs(1).rand(10) == create_random_state(1).rand(10)) + + +def test_create_py_random_state(): + pyrs = random.Random + + assert isinstance(create_py_random_state(1), pyrs) + assert isinstance(create_py_random_state(None), pyrs) + assert isinstance(create_py_random_state(pyrs(1)), pyrs) + pytest.raises(ValueError, create_py_random_state, "a") + + np = pytest.importorskip("numpy") + + rs = np.random.RandomState + rng = np.random.default_rng(1000) + rng_explicit = np.random.Generator(np.random.SFC64()) + nprs = PythonRandomInterface + assert isinstance(create_py_random_state(np.random), nprs) + assert isinstance(create_py_random_state(rs(1)), nprs) + assert isinstance(create_py_random_state(rng), nprs) + assert isinstance(create_py_random_state(rng_explicit), nprs) + # test default rng input + assert isinstance(PythonRandomInterface(), nprs) + + +def test_PythonRandomInterface_RandomState(): + np = pytest.importorskip("numpy") + + rs = np.random.RandomState + rng = PythonRandomInterface(rs(42)) + rs42 = rs(42) + + # make sure these functions are same as expected outcome + assert rng.randrange(3, 5) == rs42.randint(3, 5) + assert rng.choice([1, 2, 3]) == rs42.choice([1, 2, 3]) + assert rng.gauss(0, 1) == rs42.normal(0, 1) + assert rng.expovariate(1.5) == rs42.exponential(1 / 1.5) + assert np.all(rng.shuffle([1, 2, 3]) == rs42.shuffle([1, 2, 3])) + assert np.all( + rng.sample([1, 2, 3], 2) == rs42.choice([1, 2, 3], (2,), replace=False) + ) + assert np.all( + [rng.randint(3, 5) for _ in range(100)] + == [rs42.randint(3, 6) for _ in range(100)] + ) + assert rng.random() == rs42.random_sample() + + +def test_PythonRandomInterface_Generator(): + np = pytest.importorskip("numpy") + + rng = np.random.default_rng(42) + pri = PythonRandomInterface(np.random.default_rng(42)) + + # make sure these functions are same as expected outcome + assert pri.randrange(3, 5) == rng.integers(3, 5) + assert pri.choice([1, 2, 3]) == rng.choice([1, 2, 3]) + assert pri.gauss(0, 1) == rng.normal(0, 1) + assert pri.expovariate(1.5) == rng.exponential(1 / 1.5) + assert np.all(pri.shuffle([1, 2, 3]) == rng.shuffle([1, 2, 3])) + assert np.all( + pri.sample([1, 2, 3], 2) == rng.choice([1, 2, 3], (2,), replace=False) + ) + assert np.all( + [pri.randint(3, 5) for _ in range(100)] + == [rng.integers(3, 6) for _ in range(100)] + ) + assert pri.random() == rng.random() + + +@pytest.mark.parametrize( + ("iterable_type", "expected"), ((list, 1), (tuple, 1), (str, "["), (set, 1)) +) +def test_arbitrary_element(iterable_type, expected): + iterable = iterable_type([1, 2, 3]) + assert arbitrary_element(iterable) == expected + + +@pytest.mark.parametrize( + "iterator", ((i for i in range(3)), iter([1, 2, 3])) # generator +) +def test_arbitrary_element_raises(iterator): + """Value error is raised when input is an iterator.""" + with pytest.raises(ValueError, match="from an iterator"): + arbitrary_element(iterator) diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/test_random_sequence.py b/phivenv/Lib/site-packages/networkx/utils/tests/test_random_sequence.py new file mode 100644 index 0000000000000000000000000000000000000000..1d1b95799f75aa668801a5af482bee36fdb8d837 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/tests/test_random_sequence.py @@ -0,0 +1,38 @@ +import pytest + +from networkx.utils import ( + powerlaw_sequence, + random_weighted_sample, + weighted_choice, + zipf_rv, +) + + +def test_degree_sequences(): + seq = powerlaw_sequence(10, seed=1) + seq = powerlaw_sequence(10) + assert len(seq) == 10 + + +def test_zipf_rv(): + r = zipf_rv(2.3, xmin=2, seed=1) + r = zipf_rv(2.3, 2, 1) + r = zipf_rv(2.3) + assert type(r), int + pytest.raises(ValueError, zipf_rv, 0.5) + pytest.raises(ValueError, zipf_rv, 2, xmin=0) + + +def test_random_weighted_sample(): + mapping = {"a": 10, "b": 20} + s = random_weighted_sample(mapping, 2, seed=1) + s = random_weighted_sample(mapping, 2) + assert sorted(s) == sorted(mapping.keys()) + pytest.raises(ValueError, random_weighted_sample, mapping, 3) + + +def test_random_weighted_choice(): + mapping = {"a": 10, "b": 0} + c = weighted_choice(mapping, seed=1) + c = weighted_choice(mapping) + assert c == "a" diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/test_rcm.py b/phivenv/Lib/site-packages/networkx/utils/tests/test_rcm.py new file mode 100644 index 0000000000000000000000000000000000000000..88702b3635dfa173f27eb283bc769d0930918e62 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/tests/test_rcm.py @@ -0,0 +1,63 @@ +import networkx as nx +from networkx.utils import reverse_cuthill_mckee_ordering + + +def test_reverse_cuthill_mckee(): + # example graph from + # http://www.boost.org/doc/libs/1_37_0/libs/graph/example/cuthill_mckee_ordering.cpp + G = nx.Graph( + [ + (0, 3), + (0, 5), + (1, 2), + (1, 4), + (1, 6), + (1, 9), + (2, 3), + (2, 4), + (3, 5), + (3, 8), + (4, 6), + (5, 6), + (5, 7), + (6, 7), + ] + ) + rcm = list(reverse_cuthill_mckee_ordering(G)) + assert rcm in [[0, 8, 5, 7, 3, 6, 2, 4, 1, 9], [0, 8, 5, 7, 3, 6, 4, 2, 1, 9]] + + +def test_rcm_alternate_heuristic(): + # example from + G = nx.Graph( + [ + (0, 0), + (0, 4), + (1, 1), + (1, 2), + (1, 5), + (1, 7), + (2, 2), + (2, 4), + (3, 3), + (3, 6), + (4, 4), + (5, 5), + (5, 7), + (6, 6), + (7, 7), + ] + ) + + answers = [ + [6, 3, 5, 7, 1, 2, 4, 0], + [6, 3, 7, 5, 1, 2, 4, 0], + [7, 5, 1, 2, 4, 0, 6, 3], + ] + + def smallest_degree(G): + deg, node = min((d, n) for n, d in G.degree()) + return node + + rcm = list(reverse_cuthill_mckee_ordering(G, heuristic=smallest_degree)) + assert rcm in answers diff --git a/phivenv/Lib/site-packages/networkx/utils/tests/test_unionfind.py b/phivenv/Lib/site-packages/networkx/utils/tests/test_unionfind.py new file mode 100644 index 0000000000000000000000000000000000000000..2d30580fc942e3715f2a6a25125bad9f9e1e74b6 --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/tests/test_unionfind.py @@ -0,0 +1,55 @@ +import networkx as nx + + +def test_unionfind(): + # Fixed by: 2cddd5958689bdecdcd89b91ac9aaf6ce0e4f6b8 + # Previously (in 2.x), the UnionFind class could handle mixed types. + # But in Python 3.x, this causes a TypeError such as: + # TypeError: unorderable types: str() > int() + # + # Now we just make sure that no exception is raised. + x = nx.utils.UnionFind() + x.union(0, "a") + + +def test_subtree_union(): + # See https://github.com/networkx/networkx/pull/3224 + # (35db1b551ee65780794a357794f521d8768d5049). + # Test if subtree unions hare handled correctly by to_sets(). + uf = nx.utils.UnionFind() + uf.union(1, 2) + uf.union(3, 4) + uf.union(4, 5) + uf.union(1, 5) + assert list(uf.to_sets()) == [{1, 2, 3, 4, 5}] + + +def test_unionfind_weights(): + # Tests if weights are computed correctly with unions of many elements + uf = nx.utils.UnionFind() + uf.union(1, 4, 7) + uf.union(2, 5, 8) + uf.union(3, 6, 9) + uf.union(1, 2, 3, 4, 5, 6, 7, 8, 9) + assert uf.weights[uf[1]] == 9 + + +def test_unbalanced_merge_weights(): + # Tests if the largest set's root is used as the new root when merging + uf = nx.utils.UnionFind() + uf.union(1, 2, 3) + uf.union(4, 5, 6, 7, 8, 9) + assert uf.weights[uf[1]] == 3 + assert uf.weights[uf[4]] == 6 + largest_root = uf[4] + uf.union(1, 4) + assert uf[1] == largest_root + assert uf.weights[largest_root] == 9 + + +def test_empty_union(): + # Tests if a null-union does nothing. + uf = nx.utils.UnionFind((0, 1)) + uf.union() + assert uf[0] == 0 + assert uf[1] == 1 diff --git a/phivenv/Lib/site-packages/networkx/utils/union_find.py b/phivenv/Lib/site-packages/networkx/utils/union_find.py new file mode 100644 index 0000000000000000000000000000000000000000..2a07129f5427cd8a3caf30095efee125bc3d853b --- /dev/null +++ b/phivenv/Lib/site-packages/networkx/utils/union_find.py @@ -0,0 +1,106 @@ +""" +Union-find data structure. +""" + +from networkx.utils import groups + + +class UnionFind: + """Union-find data structure. + + Each unionFind instance X maintains a family of disjoint sets of + hashable objects, supporting the following two methods: + + - X[item] returns a name for the set containing the given item. + Each set is named by an arbitrarily-chosen one of its members; as + long as the set remains unchanged it will keep the same name. If + the item is not yet part of a set in X, a new singleton set is + created for it. + + - X.union(item1, item2, ...) merges the sets containing each item + into a single larger set. If any item is not yet part of a set + in X, it is added to X as one of the members of the merged set. + + Union-find data structure. Based on Josiah Carlson's code, + https://code.activestate.com/recipes/215912/ + with significant additional changes by D. Eppstein. + http://www.ics.uci.edu/~eppstein/PADS/UnionFind.py + + """ + + def __init__(self, elements=None): + """Create a new empty union-find structure. + + If *elements* is an iterable, this structure will be initialized + with the discrete partition on the given set of elements. + + """ + if elements is None: + elements = () + self.parents = {} + self.weights = {} + for x in elements: + self.weights[x] = 1 + self.parents[x] = x + + def __getitem__(self, object): + """Find and return the name of the set containing the object.""" + + # check for previously unknown object + if object not in self.parents: + self.parents[object] = object + self.weights[object] = 1 + return object + + # find path of objects leading to the root + path = [] + root = self.parents[object] + while root != object: + path.append(object) + object = root + root = self.parents[object] + + # compress the path and return + for ancestor in path: + self.parents[ancestor] = root + return root + + def __iter__(self): + """Iterate through all items ever found or unioned by this structure.""" + return iter(self.parents) + + def to_sets(self): + """Iterates over the sets stored in this structure. + + For example:: + + >>> partition = UnionFind("xyz") + >>> sorted(map(sorted, partition.to_sets())) + [['x'], ['y'], ['z']] + >>> partition.union("x", "y") + >>> sorted(map(sorted, partition.to_sets())) + [['x', 'y'], ['z']] + + """ + # Ensure fully pruned paths + for x in self.parents: + _ = self[x] # Evaluated for side-effect only + + yield from groups(self.parents).values() + + def union(self, *objects): + """Find the sets containing the objects and merge them all.""" + # Find the heaviest root according to its weight. + roots = iter( + sorted( + {self[x] for x in objects}, key=lambda r: self.weights[r], reverse=True + ) + ) + try: + root = next(roots) + except StopIteration: + return + + for r in roots: + self.weights[root] += self.weights[r] + self.parents[r] = root diff --git a/phivenv/Lib/site-packages/numpy/__config__.py b/phivenv/Lib/site-packages/numpy/__config__.py new file mode 100644 index 0000000000000000000000000000000000000000..5426a35d218b396ff969c425c5e9b520e30d12d4 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/__config__.py @@ -0,0 +1,162 @@ +# This file is generated by numpy's build process +# It contains system_info results at the time of building this package. +from enum import Enum +from numpy._core._multiarray_umath import ( + __cpu_features__, + __cpu_baseline__, + __cpu_dispatch__, +) + +__all__ = ["show"] +_built_with_meson = True + + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + + +def _cleanup(d): + """ + Removes empty values in a `dict` recursively + This ensures we remove values that Meson could not provide to CONFIG + """ + if isinstance(d, dict): + return {k: _cleanup(v) for k, v in d.items() if v and _cleanup(v)} + else: + return d + + +CONFIG = _cleanup( + { + "Compilers": { + "c": { + "name": "msvc", + "linker": r"link", + "version": "19.29.30154", + "commands": r"cl", + "args": r"", + "linker args": r"", + }, + "cython": { + "name": "cython", + "linker": r"cython", + "version": "3.0.11", + "commands": r"cython", + "args": r"", + "linker args": r"", + }, + "c++": { + "name": "msvc", + "linker": r"link", + "version": "19.29.30154", + "commands": r"cl", + "args": r"", + "linker args": r"", + }, + }, + "Machine Information": { + "host": { + "cpu": "x86_64", + "family": "x86_64", + "endian": "little", + "system": "windows", + }, + "build": { + "cpu": "x86_64", + "family": "x86_64", + "endian": "little", + "system": "windows", + }, + "cross-compiled": bool("False".lower().replace("false", "")), + }, + "Build Dependencies": { + "blas": { + "name": "scipy-openblas", + "found": bool("True".lower().replace("false", "")), + "version": "0.3.27", + "detection method": "pkgconfig", + "include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-vclkdodh/cp39-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/include", + "lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-vclkdodh/cp39-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/lib", + "openblas configuration": r"OpenBLAS 0.3.27 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Zen MAX_THREADS=24", + "pc file directory": r"D:/a/numpy/numpy/.openblas", + }, + "lapack": { + "name": "scipy-openblas", + "found": bool("True".lower().replace("false", "")), + "version": "0.3.27", + "detection method": "pkgconfig", + "include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-vclkdodh/cp39-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/include", + "lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-vclkdodh/cp39-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/lib", + "openblas configuration": r"OpenBLAS 0.3.27 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Zen MAX_THREADS=24", + "pc file directory": r"D:/a/numpy/numpy/.openblas", + }, + }, + "Python Information": { + "path": r"C:\Users\runneradmin\AppData\Local\Temp\build-env-le8kd45u\Scripts\python.exe", + "version": "3.9", + }, + "SIMD Extensions": { + "baseline": __cpu_baseline__, + "found": [ + feature for feature in __cpu_dispatch__ if __cpu_features__[feature] + ], + "not found": [ + feature for feature in __cpu_dispatch__ if not __cpu_features__[feature] + ], + }, + } +) + + +def _check_pyyaml(): + import yaml + + return yaml + + +def show(mode=DisplayModes.stdout.value): + """ + Show libraries and system information on which NumPy was built + and is being used + + Parameters + ---------- + mode : {`'stdout'`, `'dicts'`}, optional. + Indicates how to display the config information. + `'stdout'` prints to console, `'dicts'` returns a dictionary + of the configuration. + + Returns + ------- + out : {`dict`, `None`} + If mode is `'dicts'`, a dict is returned, else None + + See Also + -------- + get_include : Returns the directory containing NumPy C + header files. + + Notes + ----- + 1. The `'stdout'` mode will give more readable + output if ``pyyaml`` is installed + + """ + if mode == DisplayModes.stdout.value: + try: # Non-standard library, check import + yaml = _check_pyyaml() + + print(yaml.dump(CONFIG)) + except ModuleNotFoundError: + import warnings + import json + + warnings.warn("Install `pyyaml` for better output", stacklevel=1) + print(json.dumps(CONFIG, indent=2)) + elif mode == DisplayModes.dicts.value: + return CONFIG + else: + raise AttributeError( + f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" + ) diff --git a/phivenv/Lib/site-packages/numpy/__init__.cython-30.pxd b/phivenv/Lib/site-packages/numpy/__init__.cython-30.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1bb12cdd6af87b33c2d7d43bdc6abe7a88e9c4c6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/__init__.cython-30.pxd @@ -0,0 +1,1226 @@ +# NumPy static imports for Cython >= 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. This is done automatically by Cython 3.0+ if a call +# is not detected inside of the module. +# +# Author: Dag Sverre Seljebotn +# + +from cpython.ref cimport Py_INCREF +from cpython.object cimport PyObject, PyTypeObject, PyObject_TypeCheck +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.cython-30.pxd" */ + """ + + +cdef extern from "numpy/arrayobject.h": + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + ctypedef signed long long npy_int96 + ctypedef signed long long npy_int128 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + ctypedef unsigned long long npy_uint96 + ctypedef unsigned long long npy_uint128 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + pass + + ctypedef struct npy_cdouble: + pass + + ctypedef struct npy_clongdouble: + pass + + ctypedef struct npy_complex64: + pass + + ctypedef struct npy_complex128: + pass + + ctypedef struct npy_complex160: + pass + + ctypedef struct npy_complex192: + pass + + ctypedef struct npy_complex256: + pass + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES_LEGACY + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_INT128 + NPY_INT256 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_UINT128 + NPY_UINT256 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_FLOAT256 + NPY_COMPLEX32 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + NPY_COMPLEX512 + + NPY_INTP + NPY_DEFAULT_INT # Not a compile time constant (normally)! + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + # DEPRECATED since NumPy 1.7 ! Do not use in new code! + NPY_C_CONTIGUOUS + NPY_F_CONTIGUOUS + NPY_CONTIGUOUS + NPY_FORTRAN + NPY_OWNDATA + NPY_FORCECAST + NPY_ENSURECOPY + NPY_ENSUREARRAY + NPY_ELEMENTSTRIDES + NPY_ALIGNED + NPY_NOTSWAPPED + NPY_WRITEABLE + NPY_ARR_HAS_DESCR + + NPY_BEHAVED + NPY_BEHAVED_NS + NPY_CARRAY + NPY_CARRAY_RO + NPY_FARRAY + NPY_FARRAY_RO + NPY_DEFAULT + + NPY_IN_ARRAY + NPY_OUT_ARRAY + NPY_INOUT_ARRAY + NPY_IN_FARRAY + NPY_OUT_FARRAY + NPY_INOUT_FARRAY + + NPY_UPDATE_ALL + + enum: + # Added in NumPy 1.7 to replace the deprecated enums above. + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_WRITEBACKIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x + NPY_RAVEL_AXIS # Used for functions like PyArray_Mean + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef int type_num + + @property + cdef inline npy_intp itemsize(self) noexcept nogil: + return PyDataType_ELSIZE(self) + + @property + cdef inline npy_intp alignment(self) noexcept nogil: + return PyDataType_ALIGNMENT(self) + + # Use fields/names with care as they may be NULL. You must check + # for this using PyDataType_HASFIELDS. + @property + cdef inline object fields(self): + return PyDataType_FIELDS(self) + + @property + cdef inline tuple names(self): + return PyDataType_NAMES(self) + + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + @property + cdef inline PyArray_ArrayDescr* subarray(self) noexcept nogil: + return PyDataType_SUBARRAY(self) + + @property + cdef inline npy_uint64 flags(self) noexcept nogil: + """The data types flags.""" + return PyDataType_FLAGS(self) + + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + + @property + cdef inline int numiter(self) noexcept nogil: + """The number of arrays that need to be broadcast to the same shape.""" + return PyArray_MultiIter_NUMITER(self) + + @property + cdef inline npy_intp size(self) noexcept nogil: + """The total broadcasted size.""" + return PyArray_MultiIter_SIZE(self) + + @property + cdef inline npy_intp index(self) noexcept nogil: + """The current (1-d) index into the broadcasted result.""" + return PyArray_MultiIter_INDEX(self) + + @property + cdef inline int nd(self) noexcept nogil: + """The number of dimensions in the broadcasted result.""" + return PyArray_MultiIter_NDIM(self) + + @property + cdef inline npy_intp* dimensions(self) noexcept nogil: + """The shape of the broadcasted result.""" + return PyArray_MultiIter_DIMS(self) + + @property + cdef inline void** iters(self) noexcept nogil: + """An array of iterator objects that holds the iterators for the arrays to be broadcast together. + On return, the iterators are adjusted for broadcasting.""" + return PyArray_MultiIter_ITERS(self) + + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + # NOTE: no field declarations since direct access is deprecated since NumPy 1.7 + # Instead, we use properties that map to the corresponding C-API functions. + + @property + cdef inline PyObject* base(self) noexcept nogil: + """Returns a borrowed reference to the object owning the data/memory. + """ + return PyArray_BASE(self) + + @property + cdef inline dtype descr(self): + """Returns an owned reference to the dtype of the array. + """ + return PyArray_DESCR(self) + + @property + cdef inline int ndim(self) noexcept nogil: + """Returns the number of dimensions in the array. + """ + return PyArray_NDIM(self) + + @property + cdef inline npy_intp *shape(self) noexcept nogil: + """Returns a pointer to the dimensions/shape of the array. + The number of elements matches the number of dimensions of the array (ndim). + Can return NULL for 0-dimensional arrays. + """ + return PyArray_DIMS(self) + + @property + cdef inline npy_intp *strides(self) noexcept nogil: + """Returns a pointer to the strides of the array. + The number of elements matches the number of dimensions of the array (ndim). + """ + return PyArray_STRIDES(self) + + @property + cdef inline npy_intp size(self) noexcept nogil: + """Returns the total size (in number of elements) of the array. + """ + return PyArray_SIZE(self) + + @property + cdef inline char* data(self) noexcept nogil: + """The pointer to the data buffer as a char*. + This is provided for legacy reasons to avoid direct struct field access. + For new code that needs this access, you probably want to cast the result + of `PyArray_DATA()` instead, which returns a 'void*'. + """ + return PyArray_BYTES(self) + + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + npy_intp PyDataType_ELSIZE(dtype) nogil + npy_intp PyDataType_ALIGNMENT(dtype) nogil + PyObject* PyDataType_METADATA(dtype) nogil + PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil + PyObject* PyDataType_NAMES(dtype) nogil + PyObject* PyDataType_FIELDS(dtype) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + npy_uint64 PyDataType_FLAGS(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(ndarray, int val) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... + int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... + void PyArray_SetStringFunction (object, int) + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CanCastSafely (int, int) # writes errors + npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors + int PyArray_ObjectType (object, int) except 0 + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) except -1 + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_CopyInto (ndarray, ndarray) except -1 + int PyArray_CopyAnyInto (ndarray, ndarray) except -1 + int PyArray_CopyObject (ndarray, object) except -1 + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 + int PyArray_Dump (object, object, int) except -1 + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) # Cannot error + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) # clears errors as of 1.25 + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) except? -1 + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) except -1 + int PyArray_FillWithScalar (ndarray, object) except -1 + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + npy_bool PyArray_CanCastScalar (type, type) + int PyArray_RemoveSmallest (broadcast) except -1 + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) except * + void PyArray_Item_XDECREF (char *, dtype) except * + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_Correlate (object, object, int) + #int PyArray_DescrConverter (object, dtype*) except 0 + #int PyArray_DescrConverter2 (object, dtype*) except 0 + int PyArray_IntpConverter (object, PyArray_Dims *) except 0 + #int PyArray_BufferConverter (object, chunk) except 0 + int PyArray_AxisConverter (object, int *) except 0 + int PyArray_BoolConverter (object, npy_bool *) except 0 + int PyArray_ByteorderConverter (object, char *) except 0 + int PyArray_OrderConverter (object, NPY_ORDER *) except 0 + unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) except -1 + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 + #int PyArray_OutputConverter (object, ndarray*) except 0 + object PyArray_BroadcastToShape (object, npy_intp *, int) + #int PyArray_DescrAlignConverter (object, dtype*) except 0 + #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 + int PyArray_SearchsideConverter (object, void *) except 0 + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + + # additional datetime related functions are defined below + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t +#ctypedef npy_int96 int96_t +#ctypedef npy_int128 int128_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t +#ctypedef npy_uint96 uint96_t +#ctypedef npy_uint128 uint128_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +ctypedef npy_longlong longlong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + + ctypedef struct npy_datetimestruct: + int64_t year + int32_t month, day, hour, min, sec, us, ps, as + + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + NPY_FR_GENERIC + + +cdef extern from "numpy/arrayobject.h": + # These are part of the C-API defined in `__multiarray_api.h` + + # NumPy internal definitions in datetime_strings.c: + int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( + int local, NPY_DATETIMEUNIT base) + int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( + npy_datetimestruct *dts, char *outstr, npy_intp outlen, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, + NPY_CASTING casting) except -1 + + # NumPy internal definition in datetime.c: + # May return 1 to indicate that object does not appear to be a datetime + # (returns 0 on success). + int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( + PyObject *obj, npy_datetimestruct *out, + NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 + int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( + PyArray_DatetimeMetaData *meta, npy_datetime dt, + npy_datetimestruct *out) except -1 + int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( + PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, + npy_datetime *out) except -1 + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) except -1 + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base) except *: + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy._core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj) noexcept: + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj) noexcept: + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) noexcept nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) noexcept nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base + + +# Iterator API added in v1.6 +ctypedef int (*NpyIter_IterNextFunc)(NpyIter* it) noexcept nogil +ctypedef void (*NpyIter_GetMultiIndexFunc)(NpyIter* it, npy_intp* outcoords) noexcept nogil + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) diff --git a/phivenv/Lib/site-packages/numpy/__init__.pxd b/phivenv/Lib/site-packages/numpy/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..86e30e5ab01f3b1035918b6cf570d64a64ec0adf --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/__init__.pxd @@ -0,0 +1,1141 @@ +# NumPy static imports for Cython < 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. +# +# Author: Dag Sverre Seljebotn +# + +DEF _buffer_format_string_len = 255 + +cimport cpython.buffer as pybuf +from cpython.ref cimport Py_INCREF +from cpython.mem cimport PyObject_Malloc, PyObject_Free +from cpython.object cimport PyObject, PyTypeObject +from cpython.buffer cimport PyObject_GetBuffer +from cpython.type cimport type +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.pxd" */ + """ + + +cdef extern from "Python.h": + ctypedef int Py_intptr_t + bint PyObject_TypeCheck(object obj, PyTypeObject* type) + +cdef extern from "numpy/arrayobject.h": + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + ctypedef signed long long npy_int96 + ctypedef signed long long npy_int128 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + ctypedef unsigned long long npy_uint96 + ctypedef unsigned long long npy_uint128 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + pass + + ctypedef struct npy_cdouble: + pass + + ctypedef struct npy_clongdouble: + pass + + ctypedef struct npy_complex64: + pass + + ctypedef struct npy_complex128: + pass + + ctypedef struct npy_complex160: + pass + + ctypedef struct npy_complex192: + pass + + ctypedef struct npy_complex256: + pass + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES_LEGACY + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_INT128 + NPY_INT256 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_UINT128 + NPY_UINT256 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_FLOAT256 + NPY_COMPLEX32 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + NPY_COMPLEX512 + + NPY_INTP + NPY_DEFAULT_INT # Not a compile time constant (normally)! + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + # DEPRECATED since NumPy 1.7 ! Do not use in new code! + NPY_C_CONTIGUOUS + NPY_F_CONTIGUOUS + NPY_CONTIGUOUS + NPY_FORTRAN + NPY_OWNDATA + NPY_FORCECAST + NPY_ENSURECOPY + NPY_ENSUREARRAY + NPY_ELEMENTSTRIDES + NPY_ALIGNED + NPY_NOTSWAPPED + NPY_WRITEABLE + NPY_ARR_HAS_DESCR + + NPY_BEHAVED + NPY_BEHAVED_NS + NPY_CARRAY + NPY_CARRAY_RO + NPY_FARRAY + NPY_FARRAY_RO + NPY_DEFAULT + + NPY_IN_ARRAY + NPY_OUT_ARRAY + NPY_INOUT_ARRAY + NPY_IN_FARRAY + NPY_OUT_FARRAY + NPY_INOUT_FARRAY + + NPY_UPDATE_ALL + + enum: + # Added in NumPy 1.7 to replace the deprecated enums above. + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_WRITEBACKIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x + NPY_RAVEL_AXIS # Used for functions like PyArray_Mean + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + # Flags are not directly accessible on Cython <3. Use PyDataType_FLAGS. + # cdef char flags + cdef int type_num + # itemsize/elsize, alignment, fields, names, and subarray must + # use the `PyDataType_*` accessor macros. With Cython 3 you can + # still use getter attributes `dtype.itemsize` + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + cdef int numiter + cdef npy_intp size, index + cdef int nd + cdef npy_intp *dimensions + cdef void **iters + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + cdef: + # Only taking a few of the most commonly used and stable fields. + # One should use PyArray_* macros instead to access the C fields. + char *data + int ndim "nd" + npy_intp *shape "dimensions" + npy_intp *strides + dtype descr # deprecated since NumPy 1.7 ! + PyObject* base # NOT PUBLIC, DO NOT USE ! + + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + npy_intp PyDataType_ELSIZE(dtype) nogil + npy_intp PyDataType_ALIGNMENT(dtype) nogil + PyObject* PyDataType_METADATA(dtype) nogil + PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil + PyObject* PyDataType_NAMES(dtype) nogil + PyObject* PyDataType_FIELDS(dtype) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + npy_uint64 PyDataType_FLAGS(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(ndarray, int val) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... + int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... + void PyArray_SetStringFunction (object, int) + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CanCastSafely (int, int) # writes errors + npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors + int PyArray_ObjectType (object, int) except 0 + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) except -1 + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_CopyInto (ndarray, ndarray) except -1 + int PyArray_CopyAnyInto (ndarray, ndarray) except -1 + int PyArray_CopyObject (ndarray, object) except -1 + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 + int PyArray_Dump (object, object, int) except -1 + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) # Cannot error + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) # clears errors as of 1.25 + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) except? -1 + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) except -1 + int PyArray_FillWithScalar (ndarray, object) except -1 + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + npy_bool PyArray_CanCastScalar (type, type) + int PyArray_RemoveSmallest (broadcast) except -1 + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) except * + void PyArray_Item_XDECREF (char *, dtype) except * + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_Correlate (object, object, int) + #int PyArray_DescrConverter (object, dtype*) except 0 + #int PyArray_DescrConverter2 (object, dtype*) except 0 + int PyArray_IntpConverter (object, PyArray_Dims *) except 0 + #int PyArray_BufferConverter (object, chunk) except 0 + int PyArray_AxisConverter (object, int *) except 0 + int PyArray_BoolConverter (object, npy_bool *) except 0 + int PyArray_ByteorderConverter (object, char *) except 0 + int PyArray_OrderConverter (object, NPY_ORDER *) except 0 + unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) except -1 + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 + #int PyArray_OutputConverter (object, ndarray*) except 0 + object PyArray_BroadcastToShape (object, npy_intp *, int) + #int PyArray_DescrAlignConverter (object, dtype*) except 0 + #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 + int PyArray_SearchsideConverter (object, void *) except 0 + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + + # additional datetime related functions are defined below + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t +#ctypedef npy_int96 int96_t +#ctypedef npy_int128 int128_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t +#ctypedef npy_uint96 uint96_t +#ctypedef npy_uint128 uint128_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +ctypedef npy_longlong longlong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + + ctypedef struct npy_datetimestruct: + int64_t year + int32_t month, day, hour, min, sec, us, ps, as + + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + NPY_FR_GENERIC + + +cdef extern from "numpy/arrayobject.h": + # These are part of the C-API defined in `__multiarray_api.h` + + # NumPy internal definitions in datetime_strings.c: + int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( + int local, NPY_DATETIMEUNIT base) + int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( + npy_datetimestruct *dts, char *outstr, npy_intp outlen, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, + NPY_CASTING casting) except -1 + + # NumPy internal definition in datetime.c: + # May return 1 to indicate that object does not appear to be a datetime + # (returns 0 on success). + int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( + PyObject *obj, npy_datetimestruct *out, + NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 + int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( + PyArray_DatetimeMetaData *meta, npy_datetime dt, + npy_datetimestruct *out) except -1 + int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( + PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, + npy_datetime *out) except -1 + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) except -1 + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base): + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy._core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base + + +# Iterator API added in v1.6 +ctypedef int (*NpyIter_IterNextFunc)(NpyIter* it) noexcept nogil +ctypedef void (*NpyIter_GetMultiIndexFunc)(NpyIter* it, npy_intp* outcoords) noexcept nogil + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) diff --git a/phivenv/Lib/site-packages/numpy/__init__.py b/phivenv/Lib/site-packages/numpy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa980f952e06fe4d66696a4b1010b512267b7566 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/__init__.py @@ -0,0 +1,568 @@ +""" +NumPy +===== + +Provides + 1. An array object of arbitrary homogeneous items + 2. Fast mathematical operations over arrays + 3. Linear Algebra, Fourier Transforms, Random Number Generation + +How to use the documentation +---------------------------- +Documentation is available in two forms: docstrings provided +with the code, and a loose standing reference guide, available from +`the NumPy homepage `_. + +We recommend exploring the docstrings using +`IPython `_, an advanced Python shell with +TAB-completion and introspection capabilities. See below for further +instructions. + +The docstring examples assume that `numpy` has been imported as ``np``:: + + >>> import numpy as np + +Code snippets are indicated by three greater-than signs:: + + >>> x = 42 + >>> x = x + 1 + +Use the built-in ``help`` function to view a function's docstring:: + + >>> help(np.sort) + ... # doctest: +SKIP + +For some objects, ``np.info(obj)`` may provide additional help. This is +particularly true if you see the line "Help on ufunc object:" at the top +of the help() page. Ufuncs are implemented in C, not Python, for speed. +The native Python help() does not know how to view their help, but our +np.info() function does. + +Available subpackages +--------------------- +lib + Basic functions used by several sub-packages. +random + Core Random Tools +linalg + Core Linear Algebra Tools +fft + Core FFT routines +polynomial + Polynomial tools +testing + NumPy testing tools +distutils + Enhancements to distutils with support for + Fortran compilers support and more (for Python <= 3.11) + +Utilities +--------- +test + Run numpy unittests +show_config + Show numpy build configuration +__version__ + NumPy version string + +Viewing documentation using IPython +----------------------------------- + +Start IPython and import `numpy` usually under the alias ``np``: `import +numpy as np`. Then, directly past or use the ``%cpaste`` magic to paste +examples into the shell. To see which functions are available in `numpy`, +type ``np.`` (where ```` refers to the TAB key), or use +``np.*cos*?`` (where ```` refers to the ENTER key) to narrow +down the list. To view the docstring for a function, use +``np.cos?`` (to view the docstring) and ``np.cos??`` (to view +the source code). + +Copies vs. in-place operation +----------------------------- +Most of the functions in `numpy` return a copy of the array argument +(e.g., `np.sort`). In-place versions of these functions are often +available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. +Exceptions to this rule are documented. + +""" + + +# start delvewheel patch +def _delvewheel_patch_1_8_0(): + import ctypes + import os + import platform + import sys + libs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'numpy.libs')) + is_conda_cpython = platform.python_implementation() == 'CPython' and (hasattr(ctypes.pythonapi, 'Anaconda_GetVersion') or 'packaged by conda-forge' in sys.version) + if sys.version_info[:2] >= (3, 8) and not is_conda_cpython or sys.version_info[:2] >= (3, 10): + if os.path.isdir(libs_dir): + os.add_dll_directory(libs_dir) + else: + load_order_filepath = os.path.join(libs_dir, '.load-order-numpy-2.0.2') + if os.path.isfile(load_order_filepath): + with open(os.path.join(libs_dir, '.load-order-numpy-2.0.2')) as file: + load_order = file.read().split() + kernel32 = ctypes.WinDLL('kernel32', use_last_error=True) + kernel32.LoadLibraryExW.restype = ctypes.c_void_p + for lib in load_order: + lib_path = os.path.join(os.path.join(libs_dir, lib)) + if os.path.isfile(lib_path) and not kernel32.LoadLibraryExW(ctypes.c_wchar_p(lib_path), None, 8): + raise OSError('Error loading {}; {}'.format(lib, ctypes.FormatError(ctypes.get_last_error()))) + + +_delvewheel_patch_1_8_0() +del _delvewheel_patch_1_8_0 +# end delvewheel patch + +import os +import sys +import warnings + +from ._globals import _NoValue, _CopyMode +from ._expired_attrs_2_0 import __expired_attributes__ + + +# If a version with git hash was stored, use that instead +from . import version +from .version import __version__ + +# We first need to detect if we're being called as part of the numpy setup +# procedure itself in a reliable manner. +try: + __NUMPY_SETUP__ +except NameError: + __NUMPY_SETUP__ = False + +if __NUMPY_SETUP__: + sys.stderr.write('Running from numpy source directory.\n') +else: + # Allow distributors to run custom init code before importing numpy._core + from . import _distributor_init + + try: + from numpy.__config__ import show as show_config + except ImportError as e: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e + + from . import _core + from ._core import ( + False_, ScalarType, True_, _get_promotion_state, _no_nep50_warning, + _set_promotion_state, abs, absolute, acos, acosh, add, all, allclose, + amax, amin, any, arange, arccos, arccosh, arcsin, arcsinh, + arctan, arctan2, arctanh, argmax, argmin, argpartition, argsort, + argwhere, around, array, array2string, array_equal, array_equiv, + array_repr, array_str, asanyarray, asarray, ascontiguousarray, + asfortranarray, asin, asinh, atan, atanh, atan2, astype, atleast_1d, + atleast_2d, atleast_3d, base_repr, binary_repr, bitwise_and, + bitwise_count, bitwise_invert, bitwise_left_shift, bitwise_not, + bitwise_or, bitwise_right_shift, bitwise_xor, block, bool, bool_, + broadcast, busday_count, busday_offset, busdaycalendar, byte, bytes_, + can_cast, cbrt, cdouble, ceil, character, choose, clip, clongdouble, + complex128, complex64, complexfloating, compress, concat, concatenate, + conj, conjugate, convolve, copysign, copyto, correlate, cos, cosh, + count_nonzero, cross, csingle, cumprod, cumsum, + datetime64, datetime_as_string, datetime_data, deg2rad, degrees, + diagonal, divide, divmod, dot, double, dtype, e, einsum, einsum_path, + empty, empty_like, equal, errstate, euler_gamma, exp, exp2, expm1, + fabs, finfo, flatiter, flatnonzero, flexible, float16, float32, + float64, float_power, floating, floor, floor_divide, fmax, fmin, fmod, + format_float_positional, format_float_scientific, frexp, from_dlpack, + frombuffer, fromfile, fromfunction, fromiter, frompyfunc, fromstring, + full, full_like, gcd, generic, geomspace, get_printoptions, + getbufsize, geterr, geterrcall, greater, greater_equal, half, + heaviside, hstack, hypot, identity, iinfo, iinfo, indices, inexact, + inf, inner, int16, int32, int64, int8, int_, intc, integer, intp, + invert, is_busday, isclose, isdtype, isfinite, isfortran, isinf, + isnan, isnat, isscalar, issubdtype, lcm, ldexp, left_shift, less, + less_equal, lexsort, linspace, little_endian, log, log10, log1p, log2, + logaddexp, logaddexp2, logical_and, logical_not, logical_or, + logical_xor, logspace, long, longdouble, longlong, matmul, + matrix_transpose, max, maximum, may_share_memory, mean, memmap, min, + min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan, ndarray, + ndim, nditer, negative, nested_iters, newaxis, nextafter, nonzero, + not_equal, number, object_, ones, ones_like, outer, partition, + permute_dims, pi, positive, pow, power, printoptions, prod, + promote_types, ptp, put, putmask, rad2deg, radians, ravel, recarray, + reciprocal, record, remainder, repeat, require, reshape, resize, + result_type, right_shift, rint, roll, rollaxis, round, sctypeDict, + searchsorted, set_printoptions, setbufsize, seterr, seterrcall, shape, + shares_memory, short, sign, signbit, signedinteger, sin, single, sinh, + size, sort, spacing, sqrt, square, squeeze, stack, std, + str_, subtract, sum, swapaxes, take, tan, tanh, tensordot, + timedelta64, trace, transpose, true_divide, trunc, typecodes, ubyte, + ufunc, uint, uint16, uint32, uint64, uint8, uintc, uintp, ulong, + ulonglong, unsignedinteger, ushort, var, vdot, vecdot, void, vstack, + where, zeros, zeros_like + ) + + # NOTE: It's still under discussion whether these aliases + # should be removed. + for ta in ["float96", "float128", "complex192", "complex256"]: + try: + globals()[ta] = getattr(_core, ta) + except AttributeError: + pass + del ta + + from . import lib + from .lib import scimath as emath + from .lib._histograms_impl import ( + histogram, histogram_bin_edges, histogramdd + ) + from .lib._nanfunctions_impl import ( + nanargmax, nanargmin, nancumprod, nancumsum, nanmax, nanmean, + nanmedian, nanmin, nanpercentile, nanprod, nanquantile, nanstd, + nansum, nanvar + ) + from .lib._function_base_impl import ( + select, piecewise, trim_zeros, copy, iterable, percentile, diff, + gradient, angle, unwrap, sort_complex, flip, rot90, extract, place, + vectorize, asarray_chkfinite, average, bincount, digitize, cov, + corrcoef, median, sinc, hamming, hanning, bartlett, blackman, + kaiser, trapezoid, trapz, i0, meshgrid, delete, insert, append, + interp, quantile + ) + from .lib._twodim_base_impl import ( + diag, diagflat, eye, fliplr, flipud, tri, triu, tril, vander, + histogram2d, mask_indices, tril_indices, tril_indices_from, + triu_indices, triu_indices_from + ) + from .lib._shape_base_impl import ( + apply_over_axes, apply_along_axis, array_split, column_stack, dsplit, + dstack, expand_dims, hsplit, kron, put_along_axis, row_stack, split, + take_along_axis, tile, vsplit + ) + from .lib._type_check_impl import ( + iscomplexobj, isrealobj, imag, iscomplex, isreal, nan_to_num, real, + real_if_close, typename, mintypecode, common_type + ) + from .lib._arraysetops_impl import ( + ediff1d, in1d, intersect1d, isin, setdiff1d, setxor1d, union1d, + unique, unique_all, unique_counts, unique_inverse, unique_values + ) + from .lib._ufunclike_impl import fix, isneginf, isposinf + from .lib._arraypad_impl import pad + from .lib._utils_impl import ( + show_runtime, get_include, info + ) + from .lib._stride_tricks_impl import ( + broadcast_arrays, broadcast_shapes, broadcast_to + ) + from .lib._polynomial_impl import ( + poly, polyint, polyder, polyadd, polysub, polymul, polydiv, polyval, + polyfit, poly1d, roots + ) + from .lib._npyio_impl import ( + savetxt, loadtxt, genfromtxt, load, save, savez, packbits, + savez_compressed, unpackbits, fromregex + ) + from .lib._index_tricks_impl import ( + diag_indices_from, diag_indices, fill_diagonal, ndindex, ndenumerate, + ix_, c_, r_, s_, ogrid, mgrid, unravel_index, ravel_multi_index, + index_exp + ) + from . import matrixlib as _mat + from .matrixlib import ( + asmatrix, bmat, matrix + ) + + # public submodules are imported lazily, therefore are accessible from + # __getattr__. Note that `distutils` (deprecated) and `array_api` + # (experimental label) are not added here, because `from numpy import *` + # must not raise any warnings - that's too disruptive. + __numpy_submodules__ = { + "linalg", "fft", "dtypes", "random", "polynomial", "ma", + "exceptions", "lib", "ctypeslib", "testing", "typing", + "f2py", "test", "rec", "char", "core", "strings", + } + + # We build warning messages for former attributes + _msg = ( + "module 'numpy' has no attribute '{n}'.\n" + "`np.{n}` was a deprecated alias for the builtin `{n}`. " + "To avoid this error in existing code, use `{n}` by itself. " + "Doing this will not modify any behavior and is safe. {extended_msg}\n" + "The aliases was originally deprecated in NumPy 1.20; for more " + "details and guidance see the original release note at:\n" + " https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations") + + _specific_msg = ( + "If you specifically wanted the numpy scalar type, use `np.{}` here.") + + _int_extended_msg = ( + "When replacing `np.{}`, you may wish to use e.g. `np.int64` " + "or `np.int32` to specify the precision. If you wish to review " + "your current use, check the release note link for " + "additional information.") + + _type_info = [ + ("object", ""), # The NumPy scalar only exists by name. + ("float", _specific_msg.format("float64")), + ("complex", _specific_msg.format("complex128")), + ("str", _specific_msg.format("str_")), + ("int", _int_extended_msg.format("int"))] + + __former_attrs__ = { + n: _msg.format(n=n, extended_msg=extended_msg) + for n, extended_msg in _type_info + } + + + # Some of these could be defined right away, but most were aliases to + # the Python objects and only removed in NumPy 1.24. Defining them should + # probably wait for NumPy 1.26 or 2.0. + # When defined, these should possibly not be added to `__all__` to avoid + # import with `from numpy import *`. + __future_scalars__ = {"str", "bytes", "object"} + + __array_api_version__ = "2022.12" + + # now that numpy core module is imported, can initialize limits + _core.getlimits._register_known_types() + + __all__ = list( + __numpy_submodules__ | + set(_core.__all__) | + set(_mat.__all__) | + set(lib._histograms_impl.__all__) | + set(lib._nanfunctions_impl.__all__) | + set(lib._function_base_impl.__all__) | + set(lib._twodim_base_impl.__all__) | + set(lib._shape_base_impl.__all__) | + set(lib._type_check_impl.__all__) | + set(lib._arraysetops_impl.__all__) | + set(lib._ufunclike_impl.__all__) | + set(lib._arraypad_impl.__all__) | + set(lib._utils_impl.__all__) | + set(lib._stride_tricks_impl.__all__) | + set(lib._polynomial_impl.__all__) | + set(lib._npyio_impl.__all__) | + set(lib._index_tricks_impl.__all__) | + {"emath", "show_config", "__version__"} + ) + + # Filter out Cython harmless warnings + warnings.filterwarnings("ignore", message="numpy.dtype size changed") + warnings.filterwarnings("ignore", message="numpy.ufunc size changed") + warnings.filterwarnings("ignore", message="numpy.ndarray size changed") + + def __getattr__(attr): + # Warn for expired attributes + import warnings + + if attr == "linalg": + import numpy.linalg as linalg + return linalg + elif attr == "fft": + import numpy.fft as fft + return fft + elif attr == "dtypes": + import numpy.dtypes as dtypes + return dtypes + elif attr == "random": + import numpy.random as random + return random + elif attr == "polynomial": + import numpy.polynomial as polynomial + return polynomial + elif attr == "ma": + import numpy.ma as ma + return ma + elif attr == "ctypeslib": + import numpy.ctypeslib as ctypeslib + return ctypeslib + elif attr == "exceptions": + import numpy.exceptions as exceptions + return exceptions + elif attr == "testing": + import numpy.testing as testing + return testing + elif attr == "matlib": + import numpy.matlib as matlib + return matlib + elif attr == "f2py": + import numpy.f2py as f2py + return f2py + elif attr == "typing": + import numpy.typing as typing + return typing + elif attr == "rec": + import numpy.rec as rec + return rec + elif attr == "char": + import numpy.char as char + return char + elif attr == "array_api": + raise AttributeError("`numpy.array_api` is not available from " + "numpy 2.0 onwards") + elif attr == "core": + import numpy.core as core + return core + elif attr == "strings": + import numpy.strings as strings + return strings + elif attr == "distutils": + if 'distutils' in __numpy_submodules__: + import numpy.distutils as distutils + return distutils + else: + raise AttributeError("`numpy.distutils` is not available from " + "Python 3.12 onwards") + + if attr in __future_scalars__: + # And future warnings for those that will change, but also give + # the AttributeError + warnings.warn( + f"In the future `np.{attr}` will be defined as the " + "corresponding NumPy scalar.", FutureWarning, stacklevel=2) + + if attr in __former_attrs__: + raise AttributeError(__former_attrs__[attr]) + + if attr in __expired_attributes__: + raise AttributeError( + f"`np.{attr}` was removed in the NumPy 2.0 release. " + f"{__expired_attributes__[attr]}" + ) + + if attr == "chararray": + warnings.warn( + "`np.chararray` is deprecated and will be removed from " + "the main namespace in the future. Use an array with a string " + "or bytes dtype instead.", DeprecationWarning, stacklevel=2) + import numpy.char as char + return char.chararray + + raise AttributeError("module {!r} has no attribute " + "{!r}".format(__name__, attr)) + + def __dir__(): + public_symbols = ( + globals().keys() | __numpy_submodules__ + ) + public_symbols -= { + "matrixlib", "matlib", "tests", "conftest", "version", + "compat", "distutils", "array_api" + } + return list(public_symbols) + + # Pytest testing + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + def _sanity_check(): + """ + Quick sanity checks for common bugs caused by environment. + There are some cases e.g. with wrong BLAS ABI that cause wrong + results under specific runtime conditions that are not necessarily + achieved during test suite runs, and it is useful to catch those early. + + See https://github.com/numpy/numpy/issues/8577 and other + similar bug reports. + + """ + try: + x = ones(2, dtype=float32) + if not abs(x.dot(x) - float32(2.0)) < 1e-5: + raise AssertionError() + except AssertionError: + msg = ("The current Numpy installation ({!r}) fails to " + "pass simple sanity checks. This can be caused for example " + "by incorrect BLAS library being linked in, or by mixing " + "package managers (pip, conda, apt, ...). Search closed " + "numpy issues for similar problems.") + raise RuntimeError(msg.format(__file__)) from None + + _sanity_check() + del _sanity_check + + def _mac_os_check(): + """ + Quick Sanity check for Mac OS look for accelerate build bugs. + Testing numpy polyfit calls init_dgelsd(LAPACK) + """ + try: + c = array([3., 2., 1.]) + x = linspace(0, 2, 5) + y = polyval(c, x) + _ = polyfit(x, y, 2, cov=True) + except ValueError: + pass + + if sys.platform == "darwin": + from . import exceptions + with warnings.catch_warnings(record=True) as w: + _mac_os_check() + # Throw runtime error, if the test failed Check for warning and error_message + if len(w) > 0: + for _wn in w: + if _wn.category is exceptions.RankWarning: + # Ignore other warnings, they may not be relevant (see gh-25433). + error_message = f"{_wn.category.__name__}: {str(_wn.message)}" + msg = ( + "Polyfit sanity test emitted a warning, most likely due " + "to using a buggy Accelerate backend." + "\nIf you compiled yourself, more information is available at:" + "\nhttps://numpy.org/devdocs/building/index.html" + "\nOtherwise report this to the vendor " + "that provided NumPy.\n\n{}\n".format(error_message)) + raise RuntimeError(msg) + del _wn + del w + del _mac_os_check + + def hugepage_setup(): + """ + We usually use madvise hugepages support, but on some old kernels it + is slow and thus better avoided. Specifically kernel version 4.6 + had a bug fix which probably fixed this: + https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff + """ + use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None) + if sys.platform == "linux" and use_hugepage is None: + # If there is an issue with parsing the kernel version, + # set use_hugepage to 0. Usage of LooseVersion will handle + # the kernel version parsing better, but avoided since it + # will increase the import time. + # See: #16679 for related discussion. + try: + use_hugepage = 1 + kernel_version = os.uname().release.split(".")[:2] + kernel_version = tuple(int(v) for v in kernel_version) + if kernel_version < (4, 6): + use_hugepage = 0 + except ValueError: + use_hugepage = 0 + elif use_hugepage is None: + # This is not Linux, so it should not matter, just enable anyway + use_hugepage = 1 + else: + use_hugepage = int(use_hugepage) + return use_hugepage + + # Note that this will currently only make a difference on Linux + _core.multiarray._set_madvise_hugepage(hugepage_setup()) + del hugepage_setup + + # Give a warning if NumPy is reloaded or imported on a sub-interpreter + # We do this from python, since the C-module may not be reloaded and + # it is tidier organized. + _core.multiarray._multiarray_umath._reload_guard() + + # TODO: Remove the environment variable entirely now that it is "weak" + _core._set_promotion_state( + os.environ.get("NPY_PROMOTION_STATE", "weak")) + + # Tell PyInstaller where to find hook-numpy.py + def _pyinstaller_hooks_dir(): + from pathlib import Path + return [str(Path(__file__).with_name("_pyinstaller").resolve())] + + +# Remove symbols imported for internal use +del os, sys, warnings \ No newline at end of file diff --git a/phivenv/Lib/site-packages/numpy/__init__.pyi b/phivenv/Lib/site-packages/numpy/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d38d291d661de02f144f27a156b4f5ba8c19253d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/__init__.pyi @@ -0,0 +1,3915 @@ +import builtins +import sys +import os +import mmap +import ctypes as ct +import array as _array +import datetime as dt +import enum +from abc import abstractmethod +from types import TracebackType, MappingProxyType, GenericAlias +from contextlib import contextmanager + +import numpy as np +from numpy._pytesttester import PytestTester +from numpy._core._internal import _ctypes + +from numpy._typing import ( + # Arrays + ArrayLike, + NDArray, + _SupportsArray, + _NestedSequence, + _FiniteNestedSequence, + _SupportsArray, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeNumber_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, + _ArrayLikeObject_co, + _ArrayLikeStr_co, + _ArrayLikeBytes_co, + _ArrayLikeUnknown, + _UnknownType, + + # DTypes + DTypeLike, + _DTypeLike, + _DTypeLikeVoid, + _SupportsDType, + _VoidDTypeLike, + + # Shapes + _Shape, + _ShapeLike, + + # Scalars + _CharLike_co, + _IntLike_co, + _FloatLike_co, + _TD64Like_co, + _NumberLike_co, + _ScalarLike_co, + + # `number` precision + NBitBase, + # NOTE: Do not remove the extended precision bit-types even if seemingly unused; + # they're used by the mypy plugin + _256Bit, + _128Bit, + _96Bit, + _80Bit, + _64Bit, + _32Bit, + _16Bit, + _8Bit, + _NBitByte, + _NBitShort, + _NBitIntC, + _NBitIntP, + _NBitInt, + _NBitLong, + _NBitLongLong, + _NBitHalf, + _NBitSingle, + _NBitDouble, + _NBitLongDouble, + + # Character codes + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _LongCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _ULongCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, + + # Ufuncs + _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1, +) + +from numpy._typing._callable import ( + _BoolOp, + _BoolBitOp, + _BoolSub, + _BoolTrueDiv, + _BoolMod, + _BoolDivMod, + _TD64Div, + _IntTrueDiv, + _UnsignedIntOp, + _UnsignedIntBitOp, + _UnsignedIntMod, + _UnsignedIntDivMod, + _SignedIntOp, + _SignedIntBitOp, + _SignedIntMod, + _SignedIntDivMod, + _FloatOp, + _FloatMod, + _FloatDivMod, + _ComplexOp, + _NumberOp, + _ComparisonOp, +) + +# NOTE: Numpy's mypy plugin is used for removing the types unavailable +# to the specific platform +from numpy._typing._extended_precision import ( + uint128 as uint128, + uint256 as uint256, + int128 as int128, + int256 as int256, + float80 as float80, + float96 as float96, + float128 as float128, + float256 as float256, + complex160 as complex160, + complex192 as complex192, + complex256 as complex256, + complex512 as complex512, +) + +from collections.abc import ( + Callable, + Iterable, + Iterator, + Mapping, + Sequence, +) +from typing import ( + Literal as L, + Any, + Generator, + Generic, + NoReturn, + overload, + SupportsComplex, + SupportsFloat, + SupportsInt, + TypeVar, + Protocol, + SupportsIndex, + Final, + final, + ClassVar, +) + +# Ensures that the stubs are picked up +from numpy import ( + ctypeslib as ctypeslib, + exceptions as exceptions, + fft as fft, + lib as lib, + linalg as linalg, + ma as ma, + polynomial as polynomial, + random as random, + testing as testing, + version as version, + exceptions as exceptions, + dtypes as dtypes, + rec as rec, + char as char, + strings as strings, +) + +from numpy._core.records import ( + record as record, + recarray as recarray, +) + +from numpy._core.defchararray import ( + chararray as chararray, +) + +from numpy._core.function_base import ( + linspace as linspace, + logspace as logspace, + geomspace as geomspace, +) + +from numpy._core.fromnumeric import ( + take as take, + reshape as reshape, + choose as choose, + repeat as repeat, + put as put, + swapaxes as swapaxes, + transpose as transpose, + matrix_transpose as matrix_transpose, + partition as partition, + argpartition as argpartition, + sort as sort, + argsort as argsort, + argmax as argmax, + argmin as argmin, + searchsorted as searchsorted, + resize as resize, + squeeze as squeeze, + diagonal as diagonal, + trace as trace, + ravel as ravel, + nonzero as nonzero, + shape as shape, + compress as compress, + clip as clip, + sum as sum, + all as all, + any as any, + cumsum as cumsum, + ptp as ptp, + max as max, + min as min, + amax as amax, + amin as amin, + prod as prod, + cumprod as cumprod, + ndim as ndim, + size as size, + around as around, + round as round, + mean as mean, + std as std, + var as var, +) + +from numpy._core._asarray import ( + require as require, +) + +from numpy._core._type_aliases import ( + sctypeDict as sctypeDict, +) + +from numpy._core._ufunc_config import ( + seterr as seterr, + geterr as geterr, + setbufsize as setbufsize, + getbufsize as getbufsize, + seterrcall as seterrcall, + geterrcall as geterrcall, + _ErrKind, + _ErrFunc, +) + +from numpy._core.arrayprint import ( + set_printoptions as set_printoptions, + get_printoptions as get_printoptions, + array2string as array2string, + format_float_scientific as format_float_scientific, + format_float_positional as format_float_positional, + array_repr as array_repr, + array_str as array_str, + printoptions as printoptions, +) + +from numpy._core.einsumfunc import ( + einsum as einsum, + einsum_path as einsum_path, +) + +from numpy._core.multiarray import ( + array as array, + empty_like as empty_like, + empty as empty, + zeros as zeros, + concatenate as concatenate, + inner as inner, + where as where, + lexsort as lexsort, + can_cast as can_cast, + min_scalar_type as min_scalar_type, + result_type as result_type, + dot as dot, + vdot as vdot, + bincount as bincount, + copyto as copyto, + putmask as putmask, + packbits as packbits, + unpackbits as unpackbits, + shares_memory as shares_memory, + may_share_memory as may_share_memory, + asarray as asarray, + asanyarray as asanyarray, + ascontiguousarray as ascontiguousarray, + asfortranarray as asfortranarray, + arange as arange, + busday_count as busday_count, + busday_offset as busday_offset, + datetime_as_string as datetime_as_string, + datetime_data as datetime_data, + frombuffer as frombuffer, + fromfile as fromfile, + fromiter as fromiter, + is_busday as is_busday, + promote_types as promote_types, + fromstring as fromstring, + frompyfunc as frompyfunc, + nested_iters as nested_iters, + flagsobj, +) + +from numpy._core.numeric import ( + zeros_like as zeros_like, + ones as ones, + ones_like as ones_like, + full as full, + full_like as full_like, + count_nonzero as count_nonzero, + isfortran as isfortran, + argwhere as argwhere, + flatnonzero as flatnonzero, + correlate as correlate, + convolve as convolve, + outer as outer, + tensordot as tensordot, + vecdot as vecdot, + roll as roll, + rollaxis as rollaxis, + moveaxis as moveaxis, + cross as cross, + indices as indices, + fromfunction as fromfunction, + isscalar as isscalar, + binary_repr as binary_repr, + base_repr as base_repr, + identity as identity, + allclose as allclose, + isclose as isclose, + array_equal as array_equal, + array_equiv as array_equiv, + astype as astype, +) + +from numpy._core.numerictypes import ( + isdtype as isdtype, + issubdtype as issubdtype, + cast as cast, + ScalarType as ScalarType, + typecodes as typecodes, +) + +from numpy._core.shape_base import ( + atleast_1d as atleast_1d, + atleast_2d as atleast_2d, + atleast_3d as atleast_3d, + block as block, + hstack as hstack, + stack as stack, + vstack as vstack, +) + +from numpy.lib import ( + scimath as emath, +) + +from numpy.lib._arraypad_impl import ( + pad as pad, +) + +from numpy.lib._arraysetops_impl import ( + ediff1d as ediff1d, + intersect1d as intersect1d, + isin as isin, + setdiff1d as setdiff1d, + setxor1d as setxor1d, + union1d as union1d, + unique as unique, + unique_all as unique_all, + unique_counts as unique_counts, + unique_inverse as unique_inverse, + unique_values as unique_values, +) + +from numpy.lib._function_base_impl import ( + select as select, + piecewise as piecewise, + trim_zeros as trim_zeros, + copy as copy, + iterable as iterable, + percentile as percentile, + diff as diff, + gradient as gradient, + angle as angle, + unwrap as unwrap, + sort_complex as sort_complex, + disp as disp, + flip as flip, + rot90 as rot90, + extract as extract, + place as place, + asarray_chkfinite as asarray_chkfinite, + average as average, + bincount as bincount, + digitize as digitize, + cov as cov, + corrcoef as corrcoef, + median as median, + sinc as sinc, + hamming as hamming, + hanning as hanning, + bartlett as bartlett, + blackman as blackman, + kaiser as kaiser, + i0 as i0, + meshgrid as meshgrid, + delete as delete, + insert as insert, + append as append, + interp as interp, + quantile as quantile, +) + +from numpy.lib._histograms_impl import ( + histogram_bin_edges as histogram_bin_edges, + histogram as histogram, + histogramdd as histogramdd, +) + +from numpy.lib._index_tricks_impl import ( + ravel_multi_index as ravel_multi_index, + unravel_index as unravel_index, + mgrid as mgrid, + ogrid as ogrid, + r_ as r_, + c_ as c_, + s_ as s_, + index_exp as index_exp, + ix_ as ix_, + fill_diagonal as fill_diagonal, + diag_indices as diag_indices, + diag_indices_from as diag_indices_from, +) + +from numpy.lib._nanfunctions_impl import ( + nansum as nansum, + nanmax as nanmax, + nanmin as nanmin, + nanargmax as nanargmax, + nanargmin as nanargmin, + nanmean as nanmean, + nanmedian as nanmedian, + nanpercentile as nanpercentile, + nanvar as nanvar, + nanstd as nanstd, + nanprod as nanprod, + nancumsum as nancumsum, + nancumprod as nancumprod, + nanquantile as nanquantile, +) + +from numpy.lib._npyio_impl import ( + savetxt as savetxt, + loadtxt as loadtxt, + genfromtxt as genfromtxt, + load as load, + save as save, + savez as savez, + savez_compressed as savez_compressed, + packbits as packbits, + unpackbits as unpackbits, + fromregex as fromregex, +) + +from numpy.lib._polynomial_impl import ( + poly as poly, + roots as roots, + polyint as polyint, + polyder as polyder, + polyadd as polyadd, + polysub as polysub, + polymul as polymul, + polydiv as polydiv, + polyval as polyval, + polyfit as polyfit, +) + +from numpy.lib._shape_base_impl import ( + column_stack as column_stack, + dstack as dstack, + array_split as array_split, + split as split, + hsplit as hsplit, + vsplit as vsplit, + dsplit as dsplit, + apply_over_axes as apply_over_axes, + expand_dims as expand_dims, + apply_along_axis as apply_along_axis, + kron as kron, + tile as tile, + take_along_axis as take_along_axis, + put_along_axis as put_along_axis, +) + +from numpy.lib._stride_tricks_impl import ( + broadcast_to as broadcast_to, + broadcast_arrays as broadcast_arrays, + broadcast_shapes as broadcast_shapes, +) + +from numpy.lib._twodim_base_impl import ( + diag as diag, + diagflat as diagflat, + eye as eye, + fliplr as fliplr, + flipud as flipud, + tri as tri, + triu as triu, + tril as tril, + vander as vander, + histogram2d as histogram2d, + mask_indices as mask_indices, + tril_indices as tril_indices, + tril_indices_from as tril_indices_from, + triu_indices as triu_indices, + triu_indices_from as triu_indices_from, +) + +from numpy.lib._type_check_impl import ( + mintypecode as mintypecode, + real as real, + imag as imag, + iscomplex as iscomplex, + isreal as isreal, + iscomplexobj as iscomplexobj, + isrealobj as isrealobj, + nan_to_num as nan_to_num, + real_if_close as real_if_close, + typename as typename, + common_type as common_type, +) + +from numpy.lib._ufunclike_impl import ( + fix as fix, + isposinf as isposinf, + isneginf as isneginf, +) + +from numpy.lib._utils_impl import ( + get_include as get_include, + info as info, + show_runtime as show_runtime, +) + +from numpy.matrixlib import ( + asmatrix as asmatrix, + bmat as bmat, +) + +_AnyStr_contra = TypeVar("_AnyStr_contra", str, bytes, contravariant=True) + +# Protocol for representing file-like-objects accepted +# by `ndarray.tofile` and `fromfile` +class _IOProtocol(Protocol): + def flush(self) -> object: ... + def fileno(self) -> int: ... + def tell(self) -> SupportsIndex: ... + def seek(self, offset: int, whence: int, /) -> object: ... + +# NOTE: `seek`, `write` and `flush` are technically only required +# for `readwrite`/`write` modes +class _MemMapIOProtocol(Protocol): + def flush(self) -> object: ... + def fileno(self) -> SupportsIndex: ... + def tell(self) -> int: ... + def seek(self, offset: int, whence: int, /) -> object: ... + def write(self, s: bytes, /) -> object: ... + @property + def read(self) -> object: ... + +class _SupportsWrite(Protocol[_AnyStr_contra]): + def write(self, s: _AnyStr_contra, /) -> object: ... + +__all__: list[str] +__dir__: list[str] +__version__: str +__git_version__: str +__array_api_version__: str +test: PytestTester + +# TODO: Move placeholders to their respective module once +# their annotations are properly implemented +# +# Placeholders for classes + +def show_config() -> None: ... + +_NdArraySubClass = TypeVar("_NdArraySubClass", bound=NDArray[Any]) +_DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) +_ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I"] + +@final +class dtype(Generic[_DTypeScalar_co]): + names: None | tuple[builtins.str, ...] + def __hash__(self) -> int: ... + # Overload for subclass of generic + @overload + def __new__( + cls, + dtype: type[_DTypeScalar_co], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[_DTypeScalar_co]: ... + # Overloads for string aliases, Python types, and some assorted + # other special cases. Order is sometimes important because of the + # subtype relationships + # + # builtins.bool < int < float < complex < object + # + # so we have to make sure the overloads for the narrowest type is + # first. + # Builtin types + @overload + def __new__(cls, dtype: type[builtins.bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ... + @overload + def __new__(cls, dtype: type[int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int_]: ... + @overload + def __new__(cls, dtype: None | type[float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... + @overload + def __new__(cls, dtype: type[complex], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... + @overload + def __new__(cls, dtype: type[builtins.str], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... + @overload + def __new__(cls, dtype: type[bytes], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... + + # `unsignedinteger` string-based representations and ctypes + @overload + def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint8]: ... + @overload + def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint16]: ... + @overload + def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint32]: ... + @overload + def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint64]: ... + @overload + def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ubyte]: ... + @overload + def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ushort]: ... + @overload + def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintc]: ... + + # NOTE: We're assuming here that `uint_ptr_t == size_t`, + # an assumption that does not hold in rare cases (same for `ssize_t`) + @overload + def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintp]: ... + @overload + def __new__(cls, dtype: _ULongCodes | type[ct.c_ulong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulong]: ... + @overload + def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulonglong]: ... + + # `signedinteger` string-based representations and ctypes + @overload + def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int8]: ... + @overload + def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int16]: ... + @overload + def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int32]: ... + @overload + def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int64]: ... + @overload + def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[byte]: ... + @overload + def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[short]: ... + @overload + def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intc]: ... + @overload + def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intp]: ... + @overload + def __new__(cls, dtype: _LongCodes | type[ct.c_long], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[long]: ... + @overload + def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longlong]: ... + + # `floating` string-based representations and ctypes + @overload + def __new__(cls, dtype: _Float16Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float16]: ... + @overload + def __new__(cls, dtype: _Float32Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float32]: ... + @overload + def __new__(cls, dtype: _Float64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... + @overload + def __new__(cls, dtype: _HalfCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[half]: ... + @overload + def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[single]: ... + @overload + def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[double]: ... + @overload + def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longdouble]: ... + + # `complexfloating` string-based representations + @overload + def __new__(cls, dtype: _Complex64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex64]: ... + @overload + def __new__(cls, dtype: _Complex128Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... + @overload + def __new__(cls, dtype: _CSingleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[csingle]: ... + @overload + def __new__(cls, dtype: _CDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[cdouble]: ... + @overload + def __new__(cls, dtype: _CLongDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[clongdouble]: ... + + # Miscellaneous string-based representations and ctypes + @overload + def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ... + @overload + def __new__(cls, dtype: _TD64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[timedelta64]: ... + @overload + def __new__(cls, dtype: _DT64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[datetime64]: ... + @overload + def __new__(cls, dtype: _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... + @overload + def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... + @overload + def __new__(cls, dtype: _VoidCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ... + @overload + def __new__(cls, dtype: _ObjectCodes | type[ct.py_object[Any]], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[object_]: ... + + # dtype of a dtype is the same dtype + @overload + def __new__( + cls, + dtype: dtype[_DTypeScalar_co], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[_DTypeScalar_co]: ... + @overload + def __new__( + cls, + dtype: _SupportsDType[dtype[_DTypeScalar_co]], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[_DTypeScalar_co]: ... + # Handle strings that can't be expressed as literals; i.e. s1, s2, ... + @overload + def __new__( + cls, + dtype: builtins.str, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[Any]: ... + # Catchall overload for void-likes + @overload + def __new__( + cls, + dtype: _VoidDTypeLike, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[void]: ... + # Catchall overload for object-likes + @overload + def __new__( + cls, + dtype: type[object], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[object_]: ... + + def __class_getitem__(self, item: Any) -> GenericAlias: ... + + @overload + def __getitem__(self: dtype[void], key: list[builtins.str]) -> dtype[void]: ... + @overload + def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex) -> dtype[Any]: ... + + # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes + @overload + def __mul__(self: _DType, value: L[1]) -> _DType: ... + @overload + def __mul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ... + @overload + def __mul__(self, value: SupportsIndex) -> dtype[void]: ... + + # NOTE: `__rmul__` seems to be broken when used in combination with + # literals as of mypy 0.902. Set the return-type to `dtype[Any]` for + # now for non-flexible dtypes. + @overload + def __rmul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ... + @overload + def __rmul__(self, value: SupportsIndex) -> dtype[Any]: ... + + def __gt__(self, other: DTypeLike) -> builtins.bool: ... + def __ge__(self, other: DTypeLike) -> builtins.bool: ... + def __lt__(self, other: DTypeLike) -> builtins.bool: ... + def __le__(self, other: DTypeLike) -> builtins.bool: ... + + # Explicitly defined `__eq__` and `__ne__` to get around mypy's + # `strict_equality` option; even though their signatures are + # identical to their `object`-based counterpart + def __eq__(self, other: Any) -> builtins.bool: ... + def __ne__(self, other: Any) -> builtins.bool: ... + + @property + def alignment(self) -> int: ... + @property + def base(self) -> dtype[Any]: ... + @property + def byteorder(self) -> builtins.str: ... + @property + def char(self) -> builtins.str: ... + @property + def descr(self) -> list[tuple[builtins.str, builtins.str] | tuple[builtins.str, builtins.str, _Shape]]: ... + @property + def fields( + self, + ) -> None | MappingProxyType[builtins.str, tuple[dtype[Any], int] | tuple[dtype[Any], int, Any]]: ... + @property + def flags(self) -> int: ... + @property + def hasobject(self) -> builtins.bool: ... + @property + def isbuiltin(self) -> int: ... + @property + def isnative(self) -> builtins.bool: ... + @property + def isalignedstruct(self) -> builtins.bool: ... + @property + def itemsize(self) -> int: ... + @property + def kind(self) -> builtins.str: ... + @property + def metadata(self) -> None | MappingProxyType[builtins.str, Any]: ... + @property + def name(self) -> builtins.str: ... + @property + def num(self) -> int: ... + @property + def shape(self) -> _Shape: ... + @property + def ndim(self) -> int: ... + @property + def subdtype(self) -> None | tuple[dtype[Any], _Shape]: ... + def newbyteorder(self: _DType, __new_order: _ByteOrder = ...) -> _DType: ... + @property + def str(self) -> builtins.str: ... + @property + def type(self) -> type[_DTypeScalar_co]: ... + +_ArrayLikeInt = ( + int + | integer[Any] + | Sequence[int | integer[Any]] + | Sequence[Sequence[Any]] # TODO: wait for support for recursive types + | NDArray[Any] +) + +_FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter[Any]) + +@final +class flatiter(Generic[_NdArraySubClass]): + __hash__: ClassVar[None] + @property + def base(self) -> _NdArraySubClass: ... + @property + def coords(self) -> _Shape: ... + @property + def index(self) -> int: ... + def copy(self) -> _NdArraySubClass: ... + def __iter__(self: _FlatIterSelf) -> _FlatIterSelf: ... + def __next__(self: flatiter[NDArray[_ScalarType]]) -> _ScalarType: ... + def __len__(self) -> int: ... + @overload + def __getitem__( + self: flatiter[NDArray[_ScalarType]], + key: int | integer[Any] | tuple[int | integer[Any]], + ) -> _ScalarType: ... + @overload + def __getitem__( + self, + key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis], + ) -> _NdArraySubClass: ... + # TODO: `__setitem__` operates via `unsafe` casting rules, and can + # thus accept any type accepted by the relevant underlying `np.generic` + # constructor. + # This means that `value` must in reality be a supertype of `npt.ArrayLike`. + def __setitem__( + self, + key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis], + value: Any, + ) -> None: ... + @overload + def __array__(self: flatiter[ndarray[Any, _DType]], dtype: None = ..., /) -> ndarray[Any, _DType]: ... + @overload + def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... + +_OrderKACF = L[None, "K", "A", "C", "F"] +_OrderACF = L[None, "A", "C", "F"] +_OrderCF = L[None, "C", "F"] + +_ModeKind = L["raise", "wrap", "clip"] +_PartitionKind = L["introselect"] +_SortKind = L["quicksort", "mergesort", "heapsort", "stable"] +_SortSide = L["left", "right"] + +_ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon) + +class _ArrayOrScalarCommon: + @property + def T(self: _ArraySelf) -> _ArraySelf: ... + @property + def mT(self: _ArraySelf) -> _ArraySelf: ... + @property + def data(self) -> memoryview: ... + @property + def flags(self) -> flagsobj: ... + @property + def itemsize(self) -> int: ... + @property + def nbytes(self) -> int: ... + def __bool__(self) -> builtins.bool: ... + def __bytes__(self) -> bytes: ... + def __str__(self) -> str: ... + def __repr__(self) -> str: ... + def __copy__(self: _ArraySelf) -> _ArraySelf: ... + def __deepcopy__(self: _ArraySelf, memo: None | dict[int, Any], /) -> _ArraySelf: ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Any) -> Any: ... + def __ne__(self, other: Any) -> Any: ... + def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ... + def dump(self, file: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsWrite[bytes]) -> None: ... + def dumps(self) -> bytes: ... + def tobytes(self, order: _OrderKACF = ...) -> bytes: ... + # NOTE: `tostring()` is deprecated and therefore excluded + # def tostring(self, order=...): ... + def tofile( + self, + fid: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _IOProtocol, + sep: str = ..., + format: str = ..., + ) -> None: ... + # generics and 0d arrays return builtin scalars + def tolist(self) -> Any: ... + + @property + def __array_interface__(self) -> dict[str, Any]: ... + @property + def __array_priority__(self) -> float: ... + @property + def __array_struct__(self) -> Any: ... # builtins.PyCapsule + def __setstate__(self, state: tuple[ + SupportsIndex, # version + _ShapeLike, # Shape + _DType_co, # DType + np.bool, # F-continuous + bytes | list[Any], # Data + ], /) -> None: ... + # an `np.bool` is returned when `keepdims=True` and `self` is a 0d array + + @overload + def all( + self, + axis: None = ..., + out: None = ..., + keepdims: L[False] = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> np.bool: ... + @overload + def all( + self, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: builtins.bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def all( + self, + axis: None | _ShapeLike = ..., + out: _NdArraySubClass = ..., + keepdims: builtins.bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def any( + self, + axis: None = ..., + out: None = ..., + keepdims: L[False] = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> np.bool: ... + @overload + def any( + self, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: builtins.bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def any( + self, + axis: None | _ShapeLike = ..., + out: _NdArraySubClass = ..., + keepdims: builtins.bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def argmax( + self, + axis: None = ..., + out: None = ..., + *, + keepdims: L[False] = ..., + ) -> intp: ... + @overload + def argmax( + self, + axis: SupportsIndex = ..., + out: None = ..., + *, + keepdims: builtins.bool = ..., + ) -> Any: ... + @overload + def argmax( + self, + axis: None | SupportsIndex = ..., + out: _NdArraySubClass = ..., + *, + keepdims: builtins.bool = ..., + ) -> _NdArraySubClass: ... + + @overload + def argmin( + self, + axis: None = ..., + out: None = ..., + *, + keepdims: L[False] = ..., + ) -> intp: ... + @overload + def argmin( + self, + axis: SupportsIndex = ..., + out: None = ..., + *, + keepdims: builtins.bool = ..., + ) -> Any: ... + @overload + def argmin( + self, + axis: None | SupportsIndex = ..., + out: _NdArraySubClass = ..., + *, + keepdims: builtins.bool = ..., + ) -> _NdArraySubClass: ... + + def argsort( + self, + axis: None | SupportsIndex = ..., + kind: None | _SortKind = ..., + order: None | str | Sequence[str] = ..., + *, + stable: None | bool = ..., + ) -> NDArray[Any]: ... + + @overload + def choose( + self, + choices: ArrayLike, + out: None = ..., + mode: _ModeKind = ..., + ) -> NDArray[Any]: ... + @overload + def choose( + self, + choices: ArrayLike, + out: _NdArraySubClass = ..., + mode: _ModeKind = ..., + ) -> _NdArraySubClass: ... + + @overload + def clip( + self, + min: ArrayLike = ..., + max: None | ArrayLike = ..., + out: None = ..., + **kwargs: Any, + ) -> NDArray[Any]: ... + @overload + def clip( + self, + min: None = ..., + max: ArrayLike = ..., + out: None = ..., + **kwargs: Any, + ) -> NDArray[Any]: ... + @overload + def clip( + self, + min: ArrayLike = ..., + max: None | ArrayLike = ..., + out: _NdArraySubClass = ..., + **kwargs: Any, + ) -> _NdArraySubClass: ... + @overload + def clip( + self, + min: None = ..., + max: ArrayLike = ..., + out: _NdArraySubClass = ..., + **kwargs: Any, + ) -> _NdArraySubClass: ... + + @overload + def compress( + self, + a: ArrayLike, + axis: None | SupportsIndex = ..., + out: None = ..., + ) -> NDArray[Any]: ... + @overload + def compress( + self, + a: ArrayLike, + axis: None | SupportsIndex = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + def conj(self: _ArraySelf) -> _ArraySelf: ... + + def conjugate(self: _ArraySelf) -> _ArraySelf: ... + + @overload + def cumprod( + self, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> NDArray[Any]: ... + @overload + def cumprod( + self, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + @overload + def cumsum( + self, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> NDArray[Any]: ... + @overload + def cumsum( + self, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + @overload + def max( + self, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: builtins.bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def max( + self, + axis: None | _ShapeLike = ..., + out: _NdArraySubClass = ..., + keepdims: builtins.bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def mean( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: builtins.bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def mean( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + keepdims: builtins.bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def min( + self, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: builtins.bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def min( + self, + axis: None | _ShapeLike = ..., + out: _NdArraySubClass = ..., + keepdims: builtins.bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def prod( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: builtins.bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def prod( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + keepdims: builtins.bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def round( + self: _ArraySelf, + decimals: SupportsIndex = ..., + out: None = ..., + ) -> _ArraySelf: ... + @overload + def round( + self, + decimals: SupportsIndex = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + @overload + def std( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + ddof: float = ..., + keepdims: builtins.bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def std( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ddof: float = ..., + keepdims: builtins.bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def sum( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: builtins.bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def sum( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + keepdims: builtins.bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def var( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + ddof: float = ..., + keepdims: builtins.bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def var( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ddof: float = ..., + keepdims: builtins.bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + +_DType = TypeVar("_DType", bound=dtype[Any]) +_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) +_FlexDType = TypeVar("_FlexDType", bound=dtype[flexible]) + +# TODO: Set the `bound` to something more suitable once we +# have proper shape support +_ShapeType = TypeVar("_ShapeType", bound=Any) +_ShapeType2 = TypeVar("_ShapeType2", bound=Any) +_NumberType = TypeVar("_NumberType", bound=number[Any]) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer as _SupportsBuffer +else: + _SupportsBuffer = ( + bytes + | bytearray + | memoryview + | _array.array[Any] + | mmap.mmap + | NDArray[Any] + | generic + ) + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_T_contra = TypeVar("_T_contra", contravariant=True) +_2Tuple = tuple[_T, _T] +_CastingKind = L["no", "equiv", "safe", "same_kind", "unsafe"] + +_ArrayUInt_co = NDArray[np.bool | unsignedinteger[Any]] +_ArrayInt_co = NDArray[np.bool | integer[Any]] +_ArrayFloat_co = NDArray[np.bool | integer[Any] | floating[Any]] +_ArrayComplex_co = NDArray[np.bool | integer[Any] | floating[Any] | complexfloating[Any, Any]] +_ArrayNumber_co = NDArray[np.bool | number[Any]] +_ArrayTD64_co = NDArray[np.bool | integer[Any] | timedelta64] + +# Introduce an alias for `dtype` to avoid naming conflicts. +_dtype = dtype + +# `builtins.PyCapsule` unfortunately lacks annotations as of the moment; +# use `Any` as a stopgap measure +_PyCapsule = Any + +class _SupportsItem(Protocol[_T_co]): + def item(self, args: Any, /) -> _T_co: ... + +class _SupportsReal(Protocol[_T_co]): + @property + def real(self) -> _T_co: ... + +class _SupportsImag(Protocol[_T_co]): + @property + def imag(self) -> _T_co: ... + +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): + __hash__: ClassVar[None] + @property + def base(self) -> None | NDArray[Any]: ... + @property + def ndim(self) -> int: ... + @property + def size(self) -> int: ... + @property + def real( + self: ndarray[_ShapeType, dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var] + ) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ... + @real.setter + def real(self, value: ArrayLike) -> None: ... + @property + def imag( + self: ndarray[_ShapeType, dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var] + ) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ... + @imag.setter + def imag(self, value: ArrayLike) -> None: ... + def __new__( + cls: type[_ArraySelf], + shape: _ShapeLike, + dtype: DTypeLike = ..., + buffer: None | _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: None | _ShapeLike = ..., + order: _OrderKACF = ..., + ) -> _ArraySelf: ... + + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + + def __class_getitem__(self, item: Any) -> GenericAlias: ... + + @overload + def __array__( + self, dtype: None = ..., /, *, copy: None | bool = ... + ) -> ndarray[Any, _DType_co]: ... + @overload + def __array__( + self, dtype: _DType, /, *, copy: None | bool = ... + ) -> ndarray[Any, _DType]: ... + + def __array_ufunc__( + self, + ufunc: ufunc, + method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + + def __array_function__( + self, + func: Callable[..., Any], + types: Iterable[type], + args: Iterable[Any], + kwargs: Mapping[str, Any], + ) -> Any: ... + + # NOTE: In practice any object is accepted by `obj`, but as `__array_finalize__` + # is a pseudo-abstract method the type has been narrowed down in order to + # grant subclasses a bit more flexibility + def __array_finalize__(self, obj: None | NDArray[Any], /) -> None: ... + + def __array_wrap__( + self, + array: ndarray[_ShapeType2, _DType], + context: None | tuple[ufunc, tuple[Any, ...], int] = ..., + return_scalar: builtins.bool = ..., + /, + ) -> ndarray[_ShapeType2, _DType]: ... + + @overload + def __getitem__(self, key: ( + NDArray[integer[Any]] + | NDArray[np.bool] + | tuple[NDArray[integer[Any]] | NDArray[np.bool], ...] + )) -> ndarray[Any, _DType_co]: ... + @overload + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...]) -> Any: ... + @overload + def __getitem__(self, key: ( + None + | slice + | ellipsis + | SupportsIndex + | _ArrayLikeInt_co + | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] + )) -> ndarray[Any, _DType_co]: ... + @overload + def __getitem__(self: NDArray[void], key: str) -> NDArray[Any]: ... + @overload + def __getitem__(self: NDArray[void], key: list[str]) -> ndarray[_ShapeType, _dtype[void]]: ... + + @property + def ctypes(self) -> _ctypes[int]: ... + @property + def shape(self) -> _Shape: ... + @shape.setter + def shape(self, value: _ShapeLike) -> None: ... + @property + def strides(self) -> _Shape: ... + @strides.setter + def strides(self, value: _ShapeLike) -> None: ... + def byteswap(self: _ArraySelf, inplace: builtins.bool = ...) -> _ArraySelf: ... + def fill(self, value: Any) -> None: ... + @property + def flat(self: _NdArraySubClass) -> flatiter[_NdArraySubClass]: ... + + # Use the same output type as that of the underlying `generic` + @overload + def item( + self: ndarray[Any, _dtype[_SupportsItem[_T]]], # type: ignore[type-var] + *args: SupportsIndex, + ) -> _T: ... + @overload + def item( + self: ndarray[Any, _dtype[_SupportsItem[_T]]], # type: ignore[type-var] + args: tuple[SupportsIndex, ...], + /, + ) -> _T: ... + + @overload + def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = ...) -> None: ... + @overload + def resize(self, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... + + def setflags( + self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ... + ) -> None: ... + + def squeeze( + self, + axis: None | SupportsIndex | tuple[SupportsIndex, ...] = ..., + ) -> ndarray[Any, _DType_co]: ... + + def swapaxes( + self, + axis1: SupportsIndex, + axis2: SupportsIndex, + ) -> ndarray[Any, _DType_co]: ... + + @overload + def transpose(self: _ArraySelf, axes: None | _ShapeLike, /) -> _ArraySelf: ... + @overload + def transpose(self: _ArraySelf, *axes: SupportsIndex) -> _ArraySelf: ... + + def argpartition( + self, + kth: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + kind: _PartitionKind = ..., + order: None | str | Sequence[str] = ..., + ) -> NDArray[intp]: ... + + def diagonal( + self, + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + ) -> ndarray[Any, _DType_co]: ... + + # 1D + 1D returns a scalar; + # all other with at least 1 non-0D array return an ndarray. + @overload + def dot(self, b: _ScalarLike_co, out: None = ...) -> NDArray[Any]: ... + @overload + def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc] + @overload + def dot(self, b: ArrayLike, out: _NdArraySubClass) -> _NdArraySubClass: ... + + # `nonzero()` is deprecated for 0d arrays/generics + def nonzero(self) -> tuple[NDArray[intp], ...]: ... + + def partition( + self, + kth: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + kind: _PartitionKind = ..., + order: None | str | Sequence[str] = ..., + ) -> None: ... + + # `put` is technically available to `generic`, + # but is pointless as `generic`s are immutable + def put( + self, + ind: _ArrayLikeInt_co, + v: ArrayLike, + mode: _ModeKind = ..., + ) -> None: ... + + @overload + def searchsorted( # type: ignore[misc] + self, # >= 1D array + v: _ScalarLike_co, # 0D array-like + side: _SortSide = ..., + sorter: None | _ArrayLikeInt_co = ..., + ) -> intp: ... + @overload + def searchsorted( + self, # >= 1D array + v: ArrayLike, + side: _SortSide = ..., + sorter: None | _ArrayLikeInt_co = ..., + ) -> NDArray[intp]: ... + + def setfield( + self, + val: ArrayLike, + dtype: DTypeLike, + offset: SupportsIndex = ..., + ) -> None: ... + + def sort( + self, + axis: SupportsIndex = ..., + kind: None | _SortKind = ..., + order: None | str | Sequence[str] = ..., + *, + stable: None | bool = ..., + ) -> None: ... + + @overload + def trace( + self, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> Any: ... + @overload + def trace( + self, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + @overload + def take( # type: ignore[misc] + self: NDArray[_ScalarType], + indices: _IntLike_co, + axis: None | SupportsIndex = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> _ScalarType: ... + @overload + def take( # type: ignore[misc] + self, + indices: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> ndarray[Any, _DType_co]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + out: _NdArraySubClass = ..., + mode: _ModeKind = ..., + ) -> _NdArraySubClass: ... + + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + ) -> ndarray[Any, _DType_co]: ... + + def flatten( + self, + order: _OrderKACF = ..., + ) -> ndarray[Any, _DType_co]: ... + + def ravel( + self, + order: _OrderKACF = ..., + ) -> ndarray[Any, _DType_co]: ... + + @overload + def reshape( + self, shape: _ShapeLike, /, *, order: _OrderACF = ... + ) -> ndarray[Any, _DType_co]: ... + @overload + def reshape( + self, *shape: SupportsIndex, order: _OrderACF = ... + ) -> ndarray[Any, _DType_co]: ... + + @overload + def astype( + self, + dtype: _DTypeLike[_ScalarType], + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> NDArray[_ScalarType]: ... + @overload + def astype( + self, + dtype: DTypeLike, + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> NDArray[Any]: ... + + @overload + def view(self: _ArraySelf) -> _ArraySelf: ... + @overload + def view(self, type: type[_NdArraySubClass]) -> _NdArraySubClass: ... + @overload + def view(self, dtype: _DTypeLike[_ScalarType]) -> NDArray[_ScalarType]: ... + @overload + def view(self, dtype: DTypeLike) -> NDArray[Any]: ... + @overload + def view( + self, + dtype: DTypeLike, + type: type[_NdArraySubClass], + ) -> _NdArraySubClass: ... + + @overload + def getfield( + self, + dtype: _DTypeLike[_ScalarType], + offset: SupportsIndex = ... + ) -> NDArray[_ScalarType]: ... + @overload + def getfield( + self, + dtype: DTypeLike, + offset: SupportsIndex = ... + ) -> NDArray[Any]: ... + + # Dispatch to the underlying `generic` via protocols + def __int__( + self: NDArray[SupportsInt], # type: ignore[type-var] + ) -> int: ... + + def __float__( + self: NDArray[SupportsFloat], # type: ignore[type-var] + ) -> float: ... + + def __complex__( + self: NDArray[SupportsComplex], # type: ignore[type-var] + ) -> complex: ... + + def __index__( + self: NDArray[SupportsIndex], # type: ignore[type-var] + ) -> int: ... + + def __len__(self) -> int: ... + def __setitem__(self, key, value): ... + def __iter__(self) -> Any: ... + def __contains__(self, key) -> builtins.bool: ... + + # The last overload is for catching recursive objects whose + # nesting is too deep. + # The first overload is for catching `bytes` (as they are a subtype of + # `Sequence[int]`) and `str`. As `str` is a recursive sequence of + # strings, it will pass through the final overload otherwise + + @overload + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + @overload + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + + @overload + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + @overload + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + + @overload + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + @overload + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + + @overload + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + @overload + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + + # Unary ops + @overload + def __abs__(self: NDArray[_UnknownType]) -> NDArray[Any]: ... + @overload + def __abs__(self: NDArray[np.bool]) -> NDArray[np.bool]: ... + @overload + def __abs__(self: NDArray[complexfloating[_NBit1, _NBit1]]) -> NDArray[floating[_NBit1]]: ... + @overload + def __abs__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... + @overload + def __abs__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + @overload + def __abs__(self: NDArray[object_]) -> Any: ... + + @overload + def __invert__(self: NDArray[_UnknownType]) -> NDArray[Any]: ... + @overload + def __invert__(self: NDArray[np.bool]) -> NDArray[np.bool]: ... + @overload + def __invert__(self: NDArray[_IntType]) -> NDArray[_IntType]: ... + @overload + def __invert__(self: NDArray[object_]) -> Any: ... + + @overload + def __pos__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... + @overload + def __pos__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + @overload + def __pos__(self: NDArray[object_]) -> Any: ... + + @overload + def __neg__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... + @overload + def __neg__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + @overload + def __neg__(self: NDArray[object_]) -> Any: ... + + # Binary ops + @overload + def __matmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + @overload + def __matmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __matmul__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rmatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + @overload + def __rmatmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __rmatmul__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __mod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + @overload + def __mod__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + @overload + def __rmod__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __divmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> _2Tuple[NDArray[Any]]: ... + @overload + def __divmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + + @overload + def __rdivmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> _2Tuple[NDArray[Any]]: ... + @overload + def __rdivmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + + @overload + def __add__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __add__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __add__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __radd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __sub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... + @overload + def __sub__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rsub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... # type: ignore[misc] + @overload + def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __mul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __mul__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __rmul__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __floordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __floordiv__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rfloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ... + @overload + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co) -> NoReturn: ... + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __rfloordiv__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __pow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + @overload + def __pow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __pow__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rpow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + @overload + def __rpow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __rpow__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __truediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] + @overload + def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __truediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __truediv__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rtruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] + @overload + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rtruediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co) -> NoReturn: ... + @overload + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __rtruediv__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __lshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __lshift__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rlshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rlshift__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rshift__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rrshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rrshift__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __and__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __and__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __and__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rand__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __xor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __xor__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rxor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rxor__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __or__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __or__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __or__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __ror__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __ror__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + # `np.generic` does not support inplace operations + + # NOTE: Inplace ops generally use "same_kind" casting w.r.t. to the left + # operand. An exception to this rule are unsigned integers though, which + # also accepts a signed integer for the right operand as long it is a 0D + # object and its value is >= 0 + @overload + def __iadd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + @overload + def __iadd__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __iadd__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __iadd__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __iadd__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __iadd__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __isub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __isub__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __isub__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __isub__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __isub__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __isub__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __imul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + @overload + def __imul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __imul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __imul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __imul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __imul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __itruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __itruediv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __itruediv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... + @overload + def __itruediv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __ifloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __ifloordiv__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ifloordiv__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ifloordiv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __ifloordiv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... + @overload + def __ifloordiv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __ipow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __ipow__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ipow__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ipow__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __ipow__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __ipow__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __imod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __imod__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __imod__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __imod__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __imod__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + @overload + def __imod__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __ilshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __ilshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ilshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ilshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __irshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __irshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __irshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __irshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __iand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + @overload + def __iand__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __iand__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __iand__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __ixor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + @overload + def __ixor__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ixor__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ixor__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __ior__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + @overload + def __ior__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __imatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + @overload + def __imatmul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __imatmul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __imatmul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __imatmul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __imatmul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + def __dlpack__(self: NDArray[number[Any]], *, stream: None = ...) -> _PyCapsule: ... + def __dlpack_device__(self) -> tuple[int, L[0]]: ... + + def __array_namespace__(self, *, api_version: str | None = ...) -> Any: ... + + def to_device(self, device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[Any]: ... + + @property + def device(self) -> L["cpu"]: ... + + def bitwise_count( + self, + out: None | NDArray[Any] = ..., + *, + where: _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: builtins.bool = ..., + ) -> NDArray[Any]: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DType_co: ... + +# NOTE: while `np.generic` is not technically an instance of `ABCMeta`, +# the `@abstractmethod` decorator is herein used to (forcefully) deny +# the creation of `np.generic` instances. +# The `# type: ignore` comments are necessary to silence mypy errors regarding +# the missing `ABCMeta` metaclass. + +# See https://github.com/numpy/numpy-stubs/pull/80 for more details. + +_ScalarType = TypeVar("_ScalarType", bound=generic) +_NBit1 = TypeVar("_NBit1", bound=NBitBase) +_NBit2 = TypeVar("_NBit2", bound=NBitBase) + +class generic(_ArrayOrScalarCommon): + @abstractmethod + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + @overload + def __array__(self: _ScalarType, dtype: None = ..., /) -> NDArray[_ScalarType]: ... + @overload + def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... + def __hash__(self) -> int: ... + @property + def base(self) -> None: ... + @property + def ndim(self) -> L[0]: ... + @property + def size(self) -> L[1]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def strides(self) -> tuple[()]: ... + def byteswap(self: _ScalarType, inplace: L[False] = ...) -> _ScalarType: ... + @property + def flat(self: _ScalarType) -> flatiter[NDArray[_ScalarType]]: ... + + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + + @overload + def astype( + self, + dtype: _DTypeLike[_ScalarType], + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> _ScalarType: ... + @overload + def astype( + self, + dtype: DTypeLike, + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> Any: ... + + # NOTE: `view` will perform a 0D->scalar cast, + # thus the array `type` is irrelevant to the output type + @overload + def view( + self: _ScalarType, + type: type[NDArray[Any]] = ..., + ) -> _ScalarType: ... + @overload + def view( + self, + dtype: _DTypeLike[_ScalarType], + type: type[NDArray[Any]] = ..., + ) -> _ScalarType: ... + @overload + def view( + self, + dtype: DTypeLike, + type: type[NDArray[Any]] = ..., + ) -> Any: ... + + @overload + def getfield( + self, + dtype: _DTypeLike[_ScalarType], + offset: SupportsIndex = ... + ) -> _ScalarType: ... + @overload + def getfield( + self, + dtype: DTypeLike, + offset: SupportsIndex = ... + ) -> Any: ... + + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, + ) -> Any: ... + + @overload + def take( # type: ignore[misc] + self: _ScalarType, + indices: _IntLike_co, + axis: None | SupportsIndex = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> _ScalarType: ... + @overload + def take( # type: ignore[misc] + self: _ScalarType, + indices: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> NDArray[_ScalarType]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + out: _NdArraySubClass = ..., + mode: _ModeKind = ..., + ) -> _NdArraySubClass: ... + + def repeat( + self: _ScalarType, + repeats: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + ) -> NDArray[_ScalarType]: ... + + def flatten( + self: _ScalarType, + order: _OrderKACF = ..., + ) -> NDArray[_ScalarType]: ... + + def ravel( + self: _ScalarType, + order: _OrderKACF = ..., + ) -> NDArray[_ScalarType]: ... + + @overload + def reshape( + self: _ScalarType, shape: _ShapeLike, /, *, order: _OrderACF = ... + ) -> NDArray[_ScalarType]: ... + @overload + def reshape( + self: _ScalarType, *shape: SupportsIndex, order: _OrderACF = ... + ) -> NDArray[_ScalarType]: ... + + def bitwise_count( + self, + out: None | NDArray[Any] = ..., + *, + where: _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: builtins.bool = ..., + ) -> Any: ... + + def squeeze( + self: _ScalarType, axis: None | L[0] | tuple[()] = ... + ) -> _ScalarType: ... + def transpose(self: _ScalarType, axes: None | tuple[()] = ..., /) -> _ScalarType: ... + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self: _ScalarType) -> _dtype[_ScalarType]: ... + +class number(generic, Generic[_NBit1]): # type: ignore + @property + def real(self: _ArraySelf) -> _ArraySelf: ... + @property + def imag(self: _ArraySelf) -> _ArraySelf: ... + def __class_getitem__(self, item: Any) -> GenericAlias: ... + def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... + def __neg__(self: _ArraySelf) -> _ArraySelf: ... + def __pos__(self: _ArraySelf) -> _ArraySelf: ... + def __abs__(self: _ArraySelf) -> _ArraySelf: ... + # Ensure that objects annotated as `number` support arithmetic operations + __add__: _NumberOp + __radd__: _NumberOp + __sub__: _NumberOp + __rsub__: _NumberOp + __mul__: _NumberOp + __rmul__: _NumberOp + __floordiv__: _NumberOp + __rfloordiv__: _NumberOp + __pow__: _NumberOp + __rpow__: _NumberOp + __truediv__: _NumberOp + __rtruediv__: _NumberOp + __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + +class bool(generic): + def __init__(self, value: object = ..., /) -> None: ... + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, + ) -> builtins.bool: ... + def tolist(self) -> builtins.bool: ... + @property + def real(self: _ArraySelf) -> _ArraySelf: ... + @property + def imag(self: _ArraySelf) -> _ArraySelf: ... + def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... + def __abs__(self: _ArraySelf) -> _ArraySelf: ... + __add__: _BoolOp[np.bool] + __radd__: _BoolOp[np.bool] + __sub__: _BoolSub + __rsub__: _BoolSub + __mul__: _BoolOp[np.bool] + __rmul__: _BoolOp[np.bool] + __floordiv__: _BoolOp[int8] + __rfloordiv__: _BoolOp[int8] + __pow__: _BoolOp[int8] + __rpow__: _BoolOp[int8] + __truediv__: _BoolTrueDiv + __rtruediv__: _BoolTrueDiv + def __invert__(self) -> np.bool: ... + __lshift__: _BoolBitOp[int8] + __rlshift__: _BoolBitOp[int8] + __rshift__: _BoolBitOp[int8] + __rrshift__: _BoolBitOp[int8] + __and__: _BoolBitOp[np.bool] + __rand__: _BoolBitOp[np.bool] + __xor__: _BoolBitOp[np.bool] + __rxor__: _BoolBitOp[np.bool] + __or__: _BoolBitOp[np.bool] + __ror__: _BoolBitOp[np.bool] + __mod__: _BoolMod + __rmod__: _BoolMod + __divmod__: _BoolDivMod + __rdivmod__: _BoolDivMod + __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + +bool_ = bool + +class object_(generic): + def __init__(self, value: object = ..., /) -> None: ... + @property + def real(self: _ArraySelf) -> _ArraySelf: ... + @property + def imag(self: _ArraySelf) -> _ArraySelf: ... + # The 3 protocols below may or may not raise, + # depending on the underlying object + def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... + + if sys.version_info >= (3, 12): + def __release_buffer__(self, buffer: memoryview, /) -> None: ... + +# The `datetime64` constructors requires an object with the three attributes below, +# and thus supports datetime duck typing +class _DatetimeScalar(Protocol): + @property + def day(self) -> int: ... + @property + def month(self) -> int: ... + @property + def year(self) -> int: ... + +# TODO: `item`/`tolist` returns either `dt.date`, `dt.datetime` or `int` +# depending on the unit +class datetime64(generic): + @overload + def __init__( + self, + value: None | datetime64 | _CharLike_co | _DatetimeScalar = ..., + format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ..., + /, + ) -> None: ... + @overload + def __init__( + self, + value: int, + format: _CharLike_co | tuple[_CharLike_co, _IntLike_co], + /, + ) -> None: ... + def __add__(self, other: _TD64Like_co) -> datetime64: ... + def __radd__(self, other: _TD64Like_co) -> datetime64: ... + @overload + def __sub__(self, other: datetime64) -> timedelta64: ... + @overload + def __sub__(self, other: _TD64Like_co) -> datetime64: ... + def __rsub__(self, other: datetime64) -> timedelta64: ... + __lt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __le__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __ge__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + +_IntValue = SupportsInt | _CharLike_co | SupportsIndex +_FloatValue = None | _CharLike_co | SupportsFloat | SupportsIndex +_ComplexValue = ( + None + | _CharLike_co + | SupportsFloat + | SupportsComplex + | SupportsIndex + | complex # `complex` is not a subtype of `SupportsComplex` +) + +class integer(number[_NBit1]): # type: ignore + @property + def numerator(self: _ScalarType) -> _ScalarType: ... + @property + def denominator(self) -> L[1]: ... + @overload + def __round__(self, ndigits: None = ...) -> int: ... + @overload + def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... + + # NOTE: `__index__` is technically defined in the bottom-most + # sub-classes (`int64`, `uint32`, etc) + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, + ) -> int: ... + def tolist(self) -> int: ... + def is_integer(self) -> L[True]: ... + def bit_count(self: _ScalarType) -> int: ... + def __index__(self) -> int: ... + __truediv__: _IntTrueDiv[_NBit1] + __rtruediv__: _IntTrueDiv[_NBit1] + def __mod__(self, value: _IntLike_co) -> integer[Any]: ... + def __rmod__(self, value: _IntLike_co) -> integer[Any]: ... + def __invert__(self: _IntType) -> _IntType: ... + # Ensure that objects annotated as `integer` support bit-wise operations + def __lshift__(self, other: _IntLike_co) -> integer[Any]: ... + def __rlshift__(self, other: _IntLike_co) -> integer[Any]: ... + def __rshift__(self, other: _IntLike_co) -> integer[Any]: ... + def __rrshift__(self, other: _IntLike_co) -> integer[Any]: ... + def __and__(self, other: _IntLike_co) -> integer[Any]: ... + def __rand__(self, other: _IntLike_co) -> integer[Any]: ... + def __or__(self, other: _IntLike_co) -> integer[Any]: ... + def __ror__(self, other: _IntLike_co) -> integer[Any]: ... + def __xor__(self, other: _IntLike_co) -> integer[Any]: ... + def __rxor__(self, other: _IntLike_co) -> integer[Any]: ... + +class signedinteger(integer[_NBit1]): + def __init__(self, value: _IntValue = ..., /) -> None: ... + __add__: _SignedIntOp[_NBit1] + __radd__: _SignedIntOp[_NBit1] + __sub__: _SignedIntOp[_NBit1] + __rsub__: _SignedIntOp[_NBit1] + __mul__: _SignedIntOp[_NBit1] + __rmul__: _SignedIntOp[_NBit1] + __floordiv__: _SignedIntOp[_NBit1] + __rfloordiv__: _SignedIntOp[_NBit1] + __pow__: _SignedIntOp[_NBit1] + __rpow__: _SignedIntOp[_NBit1] + __lshift__: _SignedIntBitOp[_NBit1] + __rlshift__: _SignedIntBitOp[_NBit1] + __rshift__: _SignedIntBitOp[_NBit1] + __rrshift__: _SignedIntBitOp[_NBit1] + __and__: _SignedIntBitOp[_NBit1] + __rand__: _SignedIntBitOp[_NBit1] + __xor__: _SignedIntBitOp[_NBit1] + __rxor__: _SignedIntBitOp[_NBit1] + __or__: _SignedIntBitOp[_NBit1] + __ror__: _SignedIntBitOp[_NBit1] + __mod__: _SignedIntMod[_NBit1] + __rmod__: _SignedIntMod[_NBit1] + __divmod__: _SignedIntDivMod[_NBit1] + __rdivmod__: _SignedIntDivMod[_NBit1] + +int8 = signedinteger[_8Bit] +int16 = signedinteger[_16Bit] +int32 = signedinteger[_32Bit] +int64 = signedinteger[_64Bit] + +byte = signedinteger[_NBitByte] +short = signedinteger[_NBitShort] +intc = signedinteger[_NBitIntC] +intp = signedinteger[_NBitIntP] +int_ = intp +long = signedinteger[_NBitLong] +longlong = signedinteger[_NBitLongLong] + +# TODO: `item`/`tolist` returns either `dt.timedelta` or `int` +# depending on the unit +class timedelta64(generic): + def __init__( + self, + value: None | int | _CharLike_co | dt.timedelta | timedelta64 = ..., + format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ..., + /, + ) -> None: ... + @property + def numerator(self: _ScalarType) -> _ScalarType: ... + @property + def denominator(self) -> L[1]: ... + + # NOTE: Only a limited number of units support conversion + # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` + def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... + def __neg__(self: _ArraySelf) -> _ArraySelf: ... + def __pos__(self: _ArraySelf) -> _ArraySelf: ... + def __abs__(self: _ArraySelf) -> _ArraySelf: ... + def __add__(self, other: _TD64Like_co) -> timedelta64: ... + def __radd__(self, other: _TD64Like_co) -> timedelta64: ... + def __sub__(self, other: _TD64Like_co) -> timedelta64: ... + def __rsub__(self, other: _TD64Like_co) -> timedelta64: ... + def __mul__(self, other: _FloatLike_co) -> timedelta64: ... + def __rmul__(self, other: _FloatLike_co) -> timedelta64: ... + __truediv__: _TD64Div[float64] + __floordiv__: _TD64Div[int64] + def __rtruediv__(self, other: timedelta64) -> float64: ... + def __rfloordiv__(self, other: timedelta64) -> int64: ... + def __mod__(self, other: timedelta64) -> timedelta64: ... + def __rmod__(self, other: timedelta64) -> timedelta64: ... + def __divmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ... + def __rdivmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ... + __lt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __le__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __gt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __ge__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + +class unsignedinteger(integer[_NBit1]): + # NOTE: `uint64 + signedinteger -> float64` + def __init__(self, value: _IntValue = ..., /) -> None: ... + __add__: _UnsignedIntOp[_NBit1] + __radd__: _UnsignedIntOp[_NBit1] + __sub__: _UnsignedIntOp[_NBit1] + __rsub__: _UnsignedIntOp[_NBit1] + __mul__: _UnsignedIntOp[_NBit1] + __rmul__: _UnsignedIntOp[_NBit1] + __floordiv__: _UnsignedIntOp[_NBit1] + __rfloordiv__: _UnsignedIntOp[_NBit1] + __pow__: _UnsignedIntOp[_NBit1] + __rpow__: _UnsignedIntOp[_NBit1] + __lshift__: _UnsignedIntBitOp[_NBit1] + __rlshift__: _UnsignedIntBitOp[_NBit1] + __rshift__: _UnsignedIntBitOp[_NBit1] + __rrshift__: _UnsignedIntBitOp[_NBit1] + __and__: _UnsignedIntBitOp[_NBit1] + __rand__: _UnsignedIntBitOp[_NBit1] + __xor__: _UnsignedIntBitOp[_NBit1] + __rxor__: _UnsignedIntBitOp[_NBit1] + __or__: _UnsignedIntBitOp[_NBit1] + __ror__: _UnsignedIntBitOp[_NBit1] + __mod__: _UnsignedIntMod[_NBit1] + __rmod__: _UnsignedIntMod[_NBit1] + __divmod__: _UnsignedIntDivMod[_NBit1] + __rdivmod__: _UnsignedIntDivMod[_NBit1] + +uint8 = unsignedinteger[_8Bit] +uint16 = unsignedinteger[_16Bit] +uint32 = unsignedinteger[_32Bit] +uint64 = unsignedinteger[_64Bit] + +ubyte = unsignedinteger[_NBitByte] +ushort = unsignedinteger[_NBitShort] +uintc = unsignedinteger[_NBitIntC] +uintp = unsignedinteger[_NBitIntP] +uint = uintp +ulong = unsignedinteger[_NBitLong] +ulonglong = unsignedinteger[_NBitLongLong] + +class inexact(number[_NBit1]): # type: ignore + def __getnewargs__(self: inexact[_64Bit]) -> tuple[float, ...]: ... + +_IntType = TypeVar("_IntType", bound=integer[Any]) +_FloatType = TypeVar('_FloatType', bound=floating[Any]) + +class floating(inexact[_NBit1]): + def __init__(self, value: _FloatValue = ..., /) -> None: ... + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., + /, + ) -> float: ... + def tolist(self) -> float: ... + def is_integer(self) -> builtins.bool: ... + def hex(self: float64) -> str: ... + @classmethod + def fromhex(cls: type[float64], string: str, /) -> float64: ... + def as_integer_ratio(self) -> tuple[int, int]: ... + def __ceil__(self: float64) -> int: ... + def __floor__(self: float64) -> int: ... + def __trunc__(self: float64) -> int: ... + def __getnewargs__(self: float64) -> tuple[float]: ... + def __getformat__(self: float64, typestr: L["double", "float"], /) -> str: ... + @overload + def __round__(self, ndigits: None = ...) -> int: ... + @overload + def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... + __add__: _FloatOp[_NBit1] + __radd__: _FloatOp[_NBit1] + __sub__: _FloatOp[_NBit1] + __rsub__: _FloatOp[_NBit1] + __mul__: _FloatOp[_NBit1] + __rmul__: _FloatOp[_NBit1] + __truediv__: _FloatOp[_NBit1] + __rtruediv__: _FloatOp[_NBit1] + __floordiv__: _FloatOp[_NBit1] + __rfloordiv__: _FloatOp[_NBit1] + __pow__: _FloatOp[_NBit1] + __rpow__: _FloatOp[_NBit1] + __mod__: _FloatMod[_NBit1] + __rmod__: _FloatMod[_NBit1] + __divmod__: _FloatDivMod[_NBit1] + __rdivmod__: _FloatDivMod[_NBit1] + +float16 = floating[_16Bit] +float32 = floating[_32Bit] +float64 = floating[_64Bit] + +half = floating[_NBitHalf] +single = floating[_NBitSingle] +double = floating[_NBitDouble] +longdouble = floating[_NBitLongDouble] + +# The main reason for `complexfloating` having two typevars is cosmetic. +# It is used to clarify why `complex128`s precision is `_64Bit`, the latter +# describing the two 64 bit floats representing its real and imaginary component + +class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): + def __init__(self, value: _ComplexValue = ..., /) -> None: ... + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, + ) -> complex: ... + def tolist(self) -> complex: ... + @property + def real(self) -> floating[_NBit1]: ... # type: ignore[override] + @property + def imag(self) -> floating[_NBit2]: ... # type: ignore[override] + def __abs__(self) -> floating[_NBit1]: ... # type: ignore[override] + def __getnewargs__(self: complex128) -> tuple[float, float]: ... + # NOTE: Deprecated + # def __round__(self, ndigits=...): ... + __add__: _ComplexOp[_NBit1] + __radd__: _ComplexOp[_NBit1] + __sub__: _ComplexOp[_NBit1] + __rsub__: _ComplexOp[_NBit1] + __mul__: _ComplexOp[_NBit1] + __rmul__: _ComplexOp[_NBit1] + __truediv__: _ComplexOp[_NBit1] + __rtruediv__: _ComplexOp[_NBit1] + __pow__: _ComplexOp[_NBit1] + __rpow__: _ComplexOp[_NBit1] + +complex64 = complexfloating[_32Bit, _32Bit] +complex128 = complexfloating[_64Bit, _64Bit] + +csingle = complexfloating[_NBitSingle, _NBitSingle] +cdouble = complexfloating[_NBitDouble, _NBitDouble] +clongdouble = complexfloating[_NBitLongDouble, _NBitLongDouble] + +class flexible(generic): ... # type: ignore + +# TODO: `item`/`tolist` returns either `bytes` or `tuple` +# depending on whether or not it's used as an opaque bytes sequence +# or a structure +class void(flexible): + @overload + def __init__(self, value: _IntLike_co | bytes, /, dtype : None = ...) -> None: ... + @overload + def __init__(self, value: Any, /, dtype: _DTypeLikeVoid) -> None: ... + @property + def real(self: _ArraySelf) -> _ArraySelf: ... + @property + def imag(self: _ArraySelf) -> _ArraySelf: ... + def setfield( + self, val: ArrayLike, dtype: DTypeLike, offset: int = ... + ) -> None: ... + @overload + def __getitem__(self, key: str | SupportsIndex) -> Any: ... + @overload + def __getitem__(self, key: list[str]) -> void: ... + def __setitem__( + self, + key: str | list[str] | SupportsIndex, + value: ArrayLike, + ) -> None: ... + +class character(flexible): # type: ignore + def __int__(self) -> int: ... + def __float__(self) -> float: ... + +# NOTE: Most `np.bytes_` / `np.str_` methods return their +# builtin `bytes` / `str` counterpart + +class bytes_(character, bytes): + @overload + def __init__(self, value: object = ..., /) -> None: ... + @overload + def __init__( + self, value: str, /, encoding: str = ..., errors: str = ... + ) -> None: ... + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, + ) -> bytes: ... + def tolist(self) -> bytes: ... + +class str_(character, str): + @overload + def __init__(self, value: object = ..., /) -> None: ... + @overload + def __init__( + self, value: bytes, /, encoding: str = ..., errors: str = ... + ) -> None: ... + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, + ) -> str: ... + def tolist(self) -> str: ... + +# +# Constants +# + +e: Final[float] +euler_gamma: Final[float] +inf: Final[float] +nan: Final[float] +pi: Final[float] + +little_endian: Final[builtins.bool] +True_: Final[np.bool] +False_: Final[np.bool] + +newaxis: None + +# See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs +@final +class ufunc: + @property + def __name__(self) -> str: ... + @property + def __doc__(self) -> str: ... + __call__: Callable[..., Any] + @property + def nin(self) -> int: ... + @property + def nout(self) -> int: ... + @property + def nargs(self) -> int: ... + @property + def ntypes(self) -> int: ... + @property + def types(self) -> list[str]: ... + # Broad return type because it has to encompass things like + # + # >>> np.logical_and.identity is True + # True + # >>> np.add.identity is 0 + # True + # >>> np.sin.identity is None + # True + # + # and any user-defined ufuncs. + @property + def identity(self) -> Any: ... + # This is None for ufuncs and a string for gufuncs. + @property + def signature(self) -> None | str: ... + # The next four methods will always exist, but they will just + # raise a ValueError ufuncs with that don't accept two input + # arguments and return one output argument. Because of that we + # can't type them very precisely. + reduce: Any + accumulate: Any + reduceat: Any + outer: Any + # Similarly at won't be defined for ufuncs that return multiple + # outputs, so we can't type it very precisely. + at: Any + +# Parameters: `__name__`, `ntypes` and `identity` +absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] +add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]] +arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None] +arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None] +arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None] +arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None] +arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None] +arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] +arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] +bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] +bitwise_count: _UFunc_Nin1_Nout1[L['bitwise_count'], L[11], None] +bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None] +bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] +bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] +cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] +ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] +conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] +conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] +copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] +cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] +cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] +deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] +degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] +divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] +equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] +exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] +exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None] +expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None] +fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None] +float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None] +floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None] +floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None] +fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None] +fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None] +fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None] +frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None] +gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]] +greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None] +greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None] +heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None] +hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]] +invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None] +isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None] +isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None] +isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None] +isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None] +lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None] +ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None] +left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None] +less: _UFunc_Nin2_Nout1[L['less'], L[23], None] +less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None] +log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None] +log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None] +log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None] +log: _UFunc_Nin1_Nout1[L['log'], L[10], None] +logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float] +logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float] +logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]] +logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] +logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] +logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] +matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None] +maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] +minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] +mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] +modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] +multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] +negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] +nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None] +not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None] +positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None] +power: _UFunc_Nin2_Nout1[L['power'], L[18], None] +rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None] +radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None] +reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None] +remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] +right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None] +rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None] +sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None] +signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None] +sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None] +sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None] +spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None] +sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None] +square: _UFunc_Nin1_Nout1[L['square'], L[18], None] +subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] +tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] +tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] +true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] +vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None] + +abs = absolute +acos = arccos +acosh = arccosh +asin = arcsin +asinh = arcsinh +atan = arctan +atanh = arctanh +atan2 = arctan2 +concat = concatenate +bitwise_left_shift = left_shift +bitwise_invert = invert +bitwise_right_shift = right_shift +permute_dims = transpose +pow = power + +class _CopyMode(enum.Enum): + ALWAYS: L[True] + IF_NEEDED: L[False] + NEVER: L[2] + +_CallType = TypeVar("_CallType", bound=Callable[..., Any]) + +class errstate: + def __init__( + self, + *, + call: _ErrFunc | _SupportsWrite[str] = ..., + all: None | _ErrKind = ..., + divide: None | _ErrKind = ..., + over: None | _ErrKind = ..., + under: None | _ErrKind = ..., + invalid: None | _ErrKind = ..., + ) -> None: ... + def __enter__(self) -> None: ... + def __exit__( + self, + exc_type: None | type[BaseException], + exc_value: None | BaseException, + traceback: None | TracebackType, + /, + ) -> None: ... + def __call__(self, func: _CallType) -> _CallType: ... + +@contextmanager +def _no_nep50_warning() -> Generator[None, None, None]: ... +def _get_promotion_state() -> str: ... +def _set_promotion_state(state: str, /) -> None: ... + +class ndenumerate(Generic[_ScalarType]): + iter: flatiter[NDArray[_ScalarType]] + @overload + def __new__( + cls, arr: _FiniteNestedSequence[_SupportsArray[dtype[_ScalarType]]], + ) -> ndenumerate[_ScalarType]: ... + @overload + def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[str_]: ... + @overload + def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[bytes_]: ... + @overload + def __new__(cls, arr: builtins.bool | _NestedSequence[builtins.bool]) -> ndenumerate[np.bool]: ... + @overload + def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[int_]: ... + @overload + def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[float64]: ... + @overload + def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[complex128]: ... + def __next__(self: ndenumerate[_ScalarType]) -> tuple[_Shape, _ScalarType]: ... + def __iter__(self: _T) -> _T: ... + +class ndindex: + @overload + def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... + @overload + def __init__(self, *shape: SupportsIndex) -> None: ... + def __iter__(self: _T) -> _T: ... + def __next__(self) -> _Shape: ... + +# TODO: The type of each `__next__` and `iters` return-type depends +# on the length and dtype of `args`; we can't describe this behavior yet +# as we lack variadics (PEP 646). +@final +class broadcast: + def __new__(cls, *args: ArrayLike) -> broadcast: ... + @property + def index(self) -> int: ... + @property + def iters(self) -> tuple[flatiter[Any], ...]: ... + @property + def nd(self) -> int: ... + @property + def ndim(self) -> int: ... + @property + def numiter(self) -> int: ... + @property + def shape(self) -> _Shape: ... + @property + def size(self) -> int: ... + def __next__(self) -> tuple[Any, ...]: ... + def __iter__(self: _T) -> _T: ... + def reset(self) -> None: ... + +@final +class busdaycalendar: + def __new__( + cls, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + ) -> busdaycalendar: ... + @property + def weekmask(self) -> NDArray[np.bool]: ... + @property + def holidays(self) -> NDArray[datetime64]: ... + +class finfo(Generic[_FloatType]): + dtype: dtype[_FloatType] + bits: int + eps: _FloatType + epsneg: _FloatType + iexp: int + machep: int + max: _FloatType + maxexp: int + min: _FloatType + minexp: int + negep: int + nexp: int + nmant: int + precision: int + resolution: _FloatType + smallest_subnormal: _FloatType + @property + def smallest_normal(self) -> _FloatType: ... + @property + def tiny(self) -> _FloatType: ... + @overload + def __new__( + cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]] + ) -> finfo[floating[_NBit1]]: ... + @overload + def __new__( + cls, dtype: complex | float | type[complex] | type[float] + ) -> finfo[float64]: ... + @overload + def __new__( + cls, dtype: str + ) -> finfo[floating[Any]]: ... + +class iinfo(Generic[_IntType]): + dtype: dtype[_IntType] + kind: str + bits: int + key: str + @property + def min(self) -> int: ... + @property + def max(self) -> int: ... + + @overload + def __new__(cls, dtype: _IntType | _DTypeLike[_IntType]) -> iinfo[_IntType]: ... + @overload + def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ... + @overload + def __new__(cls, dtype: str) -> iinfo[Any]: ... + +_NDIterFlagsKind = L[ + "buffered", + "c_index", + "copy_if_overlap", + "common_dtype", + "delay_bufalloc", + "external_loop", + "f_index", + "grow_inner", "growinner", + "multi_index", + "ranged", + "refs_ok", + "reduce_ok", + "zerosize_ok", +] + +_NDIterOpFlagsKind = L[ + "aligned", + "allocate", + "arraymask", + "copy", + "config", + "nbo", + "no_subtype", + "no_broadcast", + "overlap_assume_elementwise", + "readonly", + "readwrite", + "updateifcopy", + "virtual", + "writeonly", + "writemasked" +] + +@final +class nditer: + def __new__( + cls, + op: ArrayLike | Sequence[ArrayLike], + flags: None | Sequence[_NDIterFlagsKind] = ..., + op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + order: _OrderKACF = ..., + casting: _CastingKind = ..., + op_axes: None | Sequence[Sequence[SupportsIndex]] = ..., + itershape: None | _ShapeLike = ..., + buffersize: SupportsIndex = ..., + ) -> nditer: ... + def __enter__(self) -> nditer: ... + def __exit__( + self, + exc_type: None | type[BaseException], + exc_value: None | BaseException, + traceback: None | TracebackType, + ) -> None: ... + def __iter__(self) -> nditer: ... + def __next__(self) -> tuple[NDArray[Any], ...]: ... + def __len__(self) -> int: ... + def __copy__(self) -> nditer: ... + @overload + def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ... + @overload + def __getitem__(self, index: slice) -> tuple[NDArray[Any], ...]: ... + def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... + def close(self) -> None: ... + def copy(self) -> nditer: ... + def debug_print(self) -> None: ... + def enable_external_loop(self) -> None: ... + def iternext(self) -> builtins.bool: ... + def remove_axis(self, i: SupportsIndex, /) -> None: ... + def remove_multi_index(self) -> None: ... + def reset(self) -> None: ... + @property + def dtypes(self) -> tuple[dtype[Any], ...]: ... + @property + def finished(self) -> builtins.bool: ... + @property + def has_delayed_bufalloc(self) -> builtins.bool: ... + @property + def has_index(self) -> builtins.bool: ... + @property + def has_multi_index(self) -> builtins.bool: ... + @property + def index(self) -> int: ... + @property + def iterationneedsapi(self) -> builtins.bool: ... + @property + def iterindex(self) -> int: ... + @property + def iterrange(self) -> tuple[int, ...]: ... + @property + def itersize(self) -> int: ... + @property + def itviews(self) -> tuple[NDArray[Any], ...]: ... + @property + def multi_index(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + @property + def nop(self) -> int: ... + @property + def operands(self) -> tuple[NDArray[Any], ...]: ... + @property + def shape(self) -> tuple[int, ...]: ... + @property + def value(self) -> tuple[NDArray[Any], ...]: ... + +_MemMapModeKind = L[ + "readonly", "r", + "copyonwrite", "c", + "readwrite", "r+", + "write", "w+", +] + +class memmap(ndarray[_ShapeType, _DType_co]): + __array_priority__: ClassVar[float] + filename: str | None + offset: int + mode: str + @overload + def __new__( + subtype, + filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, + dtype: type[uint8] = ..., + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: None | int | tuple[int, ...] = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype[uint8]]: ... + @overload + def __new__( + subtype, + filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, + dtype: _DTypeLike[_ScalarType], + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: None | int | tuple[int, ...] = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype[_ScalarType]]: ... + @overload + def __new__( + subtype, + filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, + dtype: DTypeLike, + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: None | int | tuple[int, ...] = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype[Any]]: ... + def __array_finalize__(self, obj: object) -> None: ... + def __array_wrap__( + self, + array: memmap[_ShapeType, _DType_co], + context: None | tuple[ufunc, tuple[Any, ...], int] = ..., + return_scalar: builtins.bool = ..., + ) -> Any: ... + def flush(self) -> None: ... + +# TODO: Add a mypy plugin for managing functions whose output type is dependent +# on the literal value of some sort of signature (e.g. `einsum` and `vectorize`) +class vectorize: + pyfunc: Callable[..., Any] + cache: builtins.bool + signature: None | str + otypes: None | str + excluded: set[int | str] + __doc__: None | str + def __init__( + self, + pyfunc: Callable[..., Any], + otypes: None | str | Iterable[DTypeLike] = ..., + doc: None | str = ..., + excluded: None | Iterable[int | str] = ..., + cache: builtins.bool = ..., + signature: None | str = ..., + ) -> None: ... + def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + +class poly1d: + @property + def variable(self) -> str: ... + @property + def order(self) -> int: ... + @property + def o(self) -> int: ... + @property + def roots(self) -> NDArray[Any]: ... + @property + def r(self) -> NDArray[Any]: ... + + @property + def coeffs(self) -> NDArray[Any]: ... + @coeffs.setter + def coeffs(self, value: NDArray[Any]) -> None: ... + + @property + def c(self) -> NDArray[Any]: ... + @c.setter + def c(self, value: NDArray[Any]) -> None: ... + + @property + def coef(self) -> NDArray[Any]: ... + @coef.setter + def coef(self, value: NDArray[Any]) -> None: ... + + @property + def coefficients(self) -> NDArray[Any]: ... + @coefficients.setter + def coefficients(self, value: NDArray[Any]) -> None: ... + + __hash__: ClassVar[None] # type: ignore + + @overload + def __array__(self, t: None = ..., copy: None | bool = ...) -> NDArray[Any]: ... + @overload + def __array__(self, t: _DType, copy: None | bool = ...) -> ndarray[Any, _DType]: ... + + @overload + def __call__(self, val: _ScalarLike_co) -> Any: ... + @overload + def __call__(self, val: poly1d) -> poly1d: ... + @overload + def __call__(self, val: ArrayLike) -> NDArray[Any]: ... + + def __init__( + self, + c_or_r: ArrayLike, + r: builtins.bool = ..., + variable: None | str = ..., + ) -> None: ... + def __len__(self) -> int: ... + def __neg__(self) -> poly1d: ... + def __pos__(self) -> poly1d: ... + def __mul__(self, other: ArrayLike) -> poly1d: ... + def __rmul__(self, other: ArrayLike) -> poly1d: ... + def __add__(self, other: ArrayLike) -> poly1d: ... + def __radd__(self, other: ArrayLike) -> poly1d: ... + def __pow__(self, val: _FloatLike_co) -> poly1d: ... # Integral floats are accepted + def __sub__(self, other: ArrayLike) -> poly1d: ... + def __rsub__(self, other: ArrayLike) -> poly1d: ... + def __div__(self, other: ArrayLike) -> poly1d: ... + def __truediv__(self, other: ArrayLike) -> poly1d: ... + def __rdiv__(self, other: ArrayLike) -> poly1d: ... + def __rtruediv__(self, other: ArrayLike) -> poly1d: ... + def __getitem__(self, val: int) -> Any: ... + def __setitem__(self, key: int, val: Any) -> None: ... + def __iter__(self) -> Iterator[Any]: ... + def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ... + def integ( + self, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., + ) -> poly1d: ... + +class matrix(ndarray[_ShapeType, _DType_co]): + __array_priority__: ClassVar[float] + def __new__( + subtype, + data: ArrayLike, + dtype: DTypeLike = ..., + copy: builtins.bool = ..., + ) -> matrix[Any, Any]: ... + def __array_finalize__(self, obj: object) -> None: ... + + @overload + def __getitem__(self, key: ( + SupportsIndex + | _ArrayLikeInt_co + | tuple[SupportsIndex | _ArrayLikeInt_co, ...] + )) -> Any: ... + @overload + def __getitem__(self, key: ( + None + | slice + | ellipsis + | SupportsIndex + | _ArrayLikeInt_co + | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] + )) -> matrix[Any, _DType_co]: ... + @overload + def __getitem__(self: NDArray[void], key: str) -> matrix[Any, dtype[Any]]: ... + @overload + def __getitem__(self: NDArray[void], key: list[str]) -> matrix[_ShapeType, dtype[void]]: ... + + def __mul__(self, other: ArrayLike) -> matrix[Any, Any]: ... + def __rmul__(self, other: ArrayLike) -> matrix[Any, Any]: ... + def __imul__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ... + def __pow__(self, other: ArrayLike) -> matrix[Any, Any]: ... + def __ipow__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ... + + @overload + def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + @overload + def sum(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + @overload + def sum(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def mean(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + @overload + def mean(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + @overload + def mean(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def std(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... + @overload + def std(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ... + @overload + def std(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ... + + @overload + def var(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... + @overload + def var(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ... + @overload + def var(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ... + + @overload + def prod(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + @overload + def prod(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + @overload + def prod(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def any(self, axis: None = ..., out: None = ...) -> np.bool: ... + @overload + def any(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[np.bool]]: ... + @overload + def any(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def all(self, axis: None = ..., out: None = ...) -> np.bool: ... + @overload + def all(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[np.bool]]: ... + @overload + def all(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def max(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + @overload + def max(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + @overload + def max(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def min(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + @overload + def min(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + @overload + def min(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def argmax(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ... + @overload + def argmax(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ... + @overload + def argmax(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def argmin(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ... + @overload + def argmin(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ... + @overload + def argmin(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def ptp(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + @overload + def ptp(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + @overload + def ptp(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + def squeeze(self, axis: None | _ShapeLike = ...) -> matrix[Any, _DType_co]: ... + def tolist(self: matrix[Any, dtype[_SupportsItem[_T]]]) -> list[list[_T]]: ... # type: ignore[typevar] + def ravel(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ... + def flatten(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ... + + @property + def T(self) -> matrix[Any, _DType_co]: ... + @property + def I(self) -> matrix[Any, Any]: ... + @property + def A(self) -> ndarray[_ShapeType, _DType_co]: ... + @property + def A1(self) -> ndarray[Any, _DType_co]: ... + @property + def H(self) -> matrix[Any, _DType_co]: ... + def getT(self) -> matrix[Any, _DType_co]: ... + def getI(self) -> matrix[Any, Any]: ... + def getA(self) -> ndarray[_ShapeType, _DType_co]: ... + def getA1(self) -> ndarray[Any, _DType_co]: ... + def getH(self) -> matrix[Any, _DType_co]: ... + +_CharType = TypeVar("_CharType", str_, bytes_) +_CharDType = TypeVar("_CharDType", dtype[str_], dtype[bytes_]) + +# NOTE: Deprecated +# class MachAr: ... + +class _SupportsDLPack(Protocol[_T_contra]): + def __dlpack__(self, *, stream: None | _T_contra = ...) -> _PyCapsule: ... + +def from_dlpack(obj: _SupportsDLPack[None], /) -> NDArray[Any]: ... diff --git a/phivenv/Lib/site-packages/numpy/_configtool.py b/phivenv/Lib/site-packages/numpy/_configtool.py new file mode 100644 index 0000000000000000000000000000000000000000..1be660d8397a3f6e2de78b2e3b5b19f15af5a505 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/_configtool.py @@ -0,0 +1,39 @@ +import argparse +from pathlib import Path +import sys + +from .version import __version__ +from .lib._utils_impl import get_include + + +def main() -> None: + parser = argparse.ArgumentParser() + parser.add_argument( + "--version", + action="version", + version=__version__, + help="Print the version and exit.", + ) + parser.add_argument( + "--cflags", + action="store_true", + help="Compile flag needed when using the NumPy headers.", + ) + parser.add_argument( + "--pkgconfigdir", + action="store_true", + help=("Print the pkgconfig directory in which `numpy.pc` is stored " + "(useful for setting $PKG_CONFIG_PATH)."), + ) + args = parser.parse_args() + if not sys.argv[1:]: + parser.print_help() + if args.cflags: + print("-I" + get_include()) + if args.pkgconfigdir: + _path = Path(get_include()) / '..' / 'lib' / 'pkgconfig' + print(_path.resolve()) + + +if __name__ == "__main__": + main() diff --git a/phivenv/Lib/site-packages/numpy/_distributor_init.py b/phivenv/Lib/site-packages/numpy/_distributor_init.py new file mode 100644 index 0000000000000000000000000000000000000000..5979e488538cd7cb3c9c6a67865c18c623f51d5b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/_distributor_init.py @@ -0,0 +1,15 @@ +""" Distributor init file + +Distributors: you can add custom code here to support particular distributions +of numpy. + +For example, this is a good place to put any BLAS/LAPACK initialization code. + +The numpy standard source distribution will not put code in this file, so you +can safely replace this file with your own version. +""" + +try: + from . import _distributor_init_local +except ImportError: + pass diff --git a/phivenv/Lib/site-packages/numpy/_expired_attrs_2_0.py b/phivenv/Lib/site-packages/numpy/_expired_attrs_2_0.py new file mode 100644 index 0000000000000000000000000000000000000000..e7342ce7f25ce77ab0b71b71501113388e2894b0 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/_expired_attrs_2_0.py @@ -0,0 +1,80 @@ +""" +Dict of expired attributes that are discontinued since 2.0 release. +Each item is associated with a migration note. +""" + +__expired_attributes__ = { + "geterrobj": "Use the np.errstate context manager instead.", + "seterrobj": "Use the np.errstate context manager instead.", + "cast": "Use `np.asarray(arr, dtype=dtype)` instead.", + "source": "Use `inspect.getsource` instead.", + "lookfor": "Search NumPy's documentation directly.", + "who": "Use an IDE variable explorer or `locals()` instead.", + "fastCopyAndTranspose": "Use `arr.T.copy()` instead.", + "set_numeric_ops": + "For the general case, use `PyUFunc_ReplaceLoopBySignature`. " + "For ndarray subclasses, define the ``__array_ufunc__`` method " + "and override the relevant ufunc.", + "NINF": "Use `-np.inf` instead.", + "PINF": "Use `np.inf` instead.", + "NZERO": "Use `-0.0` instead.", + "PZERO": "Use `0.0` instead.", + "add_newdoc": + "It's still available as `np.lib.add_newdoc`.", + "add_docstring": + "It's still available as `np.lib.add_docstring`.", + "add_newdoc_ufunc": + "It's an internal function and doesn't have a replacement.", + "compat": "There's no replacement, as Python 2 is no longer supported.", + "safe_eval": "Use `ast.literal_eval` instead.", + "float_": "Use `np.float64` instead.", + "complex_": "Use `np.complex128` instead.", + "longfloat": "Use `np.longdouble` instead.", + "singlecomplex": "Use `np.complex64` instead.", + "cfloat": "Use `np.complex128` instead.", + "longcomplex": "Use `np.clongdouble` instead.", + "clongfloat": "Use `np.clongdouble` instead.", + "string_": "Use `np.bytes_` instead.", + "unicode_": "Use `np.str_` instead.", + "Inf": "Use `np.inf` instead.", + "Infinity": "Use `np.inf` instead.", + "NaN": "Use `np.nan` instead.", + "infty": "Use `np.inf` instead.", + "issctype": "Use `issubclass(rep, np.generic)` instead.", + "maximum_sctype": + "Use a specific dtype instead. You should avoid relying " + "on any implicit mechanism and select the largest dtype of " + "a kind explicitly in the code.", + "obj2sctype": "Use `np.dtype(obj).type` instead.", + "sctype2char": "Use `np.dtype(obj).char` instead.", + "sctypes": "Access dtypes explicitly instead.", + "issubsctype": "Use `np.issubdtype` instead.", + "set_string_function": + "Use `np.set_printoptions` instead with a formatter for " + "custom printing of NumPy objects.", + "asfarray": "Use `np.asarray` with a proper dtype instead.", + "issubclass_": "Use `issubclass` builtin instead.", + "tracemalloc_domain": "It's now available from `np.lib`.", + "mat": "Use `np.asmatrix` instead.", + "recfromcsv": "Use `np.genfromtxt` with comma delimiter instead.", + "recfromtxt": "Use `np.genfromtxt` instead.", + "deprecate": "Emit `DeprecationWarning` with `warnings.warn` directly, " + "or use `typing.deprecated`.", + "deprecate_with_doc": "Emit `DeprecationWarning` with `warnings.warn` " + "directly, or use `typing.deprecated`.", + "disp": "Use your own printing function instead.", + "find_common_type": + "Use `numpy.promote_types` or `numpy.result_type` instead. " + "To achieve semantics for the `scalar_types` argument, use " + "`numpy.result_type` and pass the Python values `0`, `0.0`, or `0j`.", + "round_": "Use `np.round` instead.", + "get_array_wrap": "", + "DataSource": "It's still available as `np.lib.npyio.DataSource`.", + "nbytes": "Use `np.dtype().itemsize` instead.", + "byte_bounds": "Now it's available under `np.lib.array_utils.byte_bounds`", + "compare_chararrays": + "It's still available as `np.char.compare_chararrays`.", + "format_parser": "It's still available as `np.rec.format_parser`.", + "alltrue": "Use `np.all` instead.", + "sometrue": "Use `np.any` instead.", +} diff --git a/phivenv/Lib/site-packages/numpy/_globals.py b/phivenv/Lib/site-packages/numpy/_globals.py new file mode 100644 index 0000000000000000000000000000000000000000..f916e0612031b864ecce12c87e5d6ea2977a0515 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/_globals.py @@ -0,0 +1,95 @@ +""" +Module defining global singleton classes. + +This module raises a RuntimeError if an attempt to reload it is made. In that +way the identities of the classes defined here are fixed and will remain so +even if numpy itself is reloaded. In particular, a function like the following +will still work correctly after numpy is reloaded:: + + def foo(arg=np._NoValue): + if arg is np._NoValue: + ... + +That was not the case when the singleton classes were defined in the numpy +``__init__.py`` file. See gh-7844 for a discussion of the reload problem that +motivated this module. + +""" +import enum + +from ._utils import set_module as _set_module + +__all__ = ['_NoValue', '_CopyMode'] + + +# Disallow reloading this module so as to preserve the identities of the +# classes defined here. +if '_is_loaded' in globals(): + raise RuntimeError('Reloading numpy._globals is not allowed') +_is_loaded = True + + +class _NoValueType: + """Special keyword value. + + The instance of this class may be used as the default value assigned to a + keyword if no other obvious default (e.g., `None`) is suitable, + + Common reasons for using this keyword are: + + - A new keyword is added to a function, and that function forwards its + inputs to another function or method which can be defined outside of + NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims`` + keyword was added that could only be forwarded if the user explicitly + specified ``keepdims``; downstream array libraries may not have added + the same keyword, so adding ``x.std(..., keepdims=keepdims)`` + unconditionally could have broken previously working code. + - A keyword is being deprecated, and a deprecation warning must only be + emitted when the keyword is used. + + """ + __instance = None + def __new__(cls): + # ensure that only one instance exists + if not cls.__instance: + cls.__instance = super().__new__(cls) + return cls.__instance + + def __repr__(self): + return "" + + +_NoValue = _NoValueType() + + +@_set_module("numpy") +class _CopyMode(enum.Enum): + """ + An enumeration for the copy modes supported + by numpy.copy() and numpy.array(). The following three modes are supported, + + - ALWAYS: This means that a deep copy of the input + array will always be taken. + - IF_NEEDED: This means that a deep copy of the input + array will be taken only if necessary. + - NEVER: This means that the deep copy will never be taken. + If a copy cannot be avoided then a `ValueError` will be + raised. + + Note that the buffer-protocol could in theory do copies. NumPy currently + assumes an object exporting the buffer protocol will never do this. + """ + + ALWAYS = True + NEVER = False + IF_NEEDED = 2 + + def __bool__(self): + # For backwards compatibility + if self == _CopyMode.ALWAYS: + return True + + if self == _CopyMode.NEVER: + return False + + raise ValueError(f"{self} is neither True nor False.") diff --git a/phivenv/Lib/site-packages/numpy/_pytesttester.py b/phivenv/Lib/site-packages/numpy/_pytesttester.py new file mode 100644 index 0000000000000000000000000000000000000000..ec24fb9abd56e75e98de8f38498236ad8d4b30f2 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/_pytesttester.py @@ -0,0 +1,199 @@ +""" +Pytest test running. + +This module implements the ``test()`` function for NumPy modules. The usual +boiler plate for doing that is to put the following in the module +``__init__.py`` file:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + +Warnings filtering and other runtime settings should be dealt with in the +``pytest.ini`` file in the numpy repo root. The behavior of the test depends on +whether or not that file is found as follows: + +* ``pytest.ini`` is present (develop mode) + All warnings except those explicitly filtered out are raised as error. +* ``pytest.ini`` is absent (release mode) + DeprecationWarnings and PendingDeprecationWarnings are ignored, other + warnings are passed through. + +In practice, tests run from the numpy repo are run in development mode with +``spin``, through the standard ``spin test`` invocation or from an inplace +build with ``pytest numpy``. + +This module is imported by every numpy subpackage, so lies at the top level to +simplify circular import issues. For the same reason, it contains no numpy +imports at module scope, instead importing numpy within function calls. +""" +import sys +import os + +__all__ = ['PytestTester'] + + +def _show_numpy_info(): + import numpy as np + + print("NumPy version %s" % np.__version__) + info = np.lib._utils_impl._opt_info() + print("NumPy CPU features: ", (info if info else 'nothing enabled')) + + +class PytestTester: + """ + Pytest test runner. + + A test function is typically added to a package's __init__.py like so:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__).test + del PytestTester + + Calling this test function finds and runs all tests associated with the + module and all its sub-modules. + + Attributes + ---------- + module_name : str + Full path to the package to test. + + Parameters + ---------- + module_name : module name + The name of the module to test. + + Notes + ----- + Unlike the previous ``nose``-based implementation, this class is not + publicly exposed as it performs some ``numpy``-specific warning + suppression. + + """ + def __init__(self, module_name): + self.module_name = module_name + + def __call__(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, durations=-1, tests=None): + """ + Run tests for module using pytest. + + Parameters + ---------- + label : {'fast', 'full'}, optional + Identifies the tests to run. When set to 'fast', tests decorated + with `pytest.mark.slow` are skipped, when 'full', the slow marker + is ignored. + verbose : int, optional + Verbosity value for test outputs, in the range 1-3. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to pytests. + doctests : bool, optional + .. note:: Not supported + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + Requires installation of (pip) pytest-cov. + durations : int, optional + If < 0, do nothing, If 0, report time of all tests, if > 0, + report the time of the slowest `timer` tests. Default is -1. + tests : test or list of tests + Tests to be executed with pytest '--pyargs' + + Returns + ------- + result : bool + Return True on success, false otherwise. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for + it. For example, to run all tests for numpy.lib: + + >>> np.lib.test() #doctest: +SKIP + + Examples + -------- + >>> result = np.lib.test() #doctest: +SKIP + ... + 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds + >>> result + True + + """ + import pytest + import warnings + + module = sys.modules[self.module_name] + module_path = os.path.abspath(module.__path__[0]) + + # setup the pytest arguments + pytest_args = ["-l"] + + # offset verbosity. The "-q" cancels a "-v". + pytest_args += ["-q"] + + if sys.version_info < (3, 12): + with warnings.catch_warnings(): + warnings.simplefilter("always") + # Filter out distutils cpu warnings (could be localized to + # distutils tests). ASV has problems with top level import, + # so fetch module for suppression here. + from numpy.distutils import cpuinfo + + # Filter out annoying import messages. Want these in both develop and + # release mode. + pytest_args += [ + "-W ignore:Not importing directory", + "-W ignore:numpy.dtype size changed", + "-W ignore:numpy.ufunc size changed", + "-W ignore::UserWarning:cpuinfo", + ] + + # When testing matrices, ignore their PendingDeprecationWarnings + pytest_args += [ + "-W ignore:the matrix subclass is not", + "-W ignore:Importing from numpy.matlib is", + ] + + if doctests: + pytest_args += ["--doctest-modules"] + + if extra_argv: + pytest_args += list(extra_argv) + + if verbose > 1: + pytest_args += ["-" + "v"*(verbose - 1)] + + if coverage: + pytest_args += ["--cov=" + module_path] + + if label == "fast": + # not importing at the top level to avoid circular import of module + from numpy.testing import IS_PYPY + if IS_PYPY: + pytest_args += ["-m", "not slow and not slow_pypy"] + else: + pytest_args += ["-m", "not slow"] + + elif label != "full": + pytest_args += ["-m", label] + + if durations >= 0: + pytest_args += ["--durations=%s" % durations] + + if tests is None: + tests = [self.module_name] + + pytest_args += ["--pyargs"] + list(tests) + + # run tests. + _show_numpy_info() + + try: + code = pytest.main(pytest_args) + except SystemExit as exc: + code = exc.code + + return code == 0 diff --git a/phivenv/Lib/site-packages/numpy/_pytesttester.pyi b/phivenv/Lib/site-packages/numpy/_pytesttester.pyi new file mode 100644 index 0000000000000000000000000000000000000000..91c1a876b212b4641c026c0be230d139f1cef8b4 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/_pytesttester.pyi @@ -0,0 +1,18 @@ +from collections.abc import Iterable +from typing import Literal as L + +__all__: list[str] + +class PytestTester: + module_name: str + def __init__(self, module_name: str) -> None: ... + def __call__( + self, + label: L["fast", "full"] = ..., + verbose: int = ..., + extra_argv: None | Iterable[str] = ..., + doctests: L[False] = ..., + coverage: bool = ..., + durations: int = ..., + tests: None | Iterable[str] = ..., + ) -> bool: ... diff --git a/phivenv/Lib/site-packages/numpy/char/__init__.py b/phivenv/Lib/site-packages/numpy/char/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ddaafabbba5aafb637d5c2cda2aacf76371d6046 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/char/__init__.py @@ -0,0 +1,2 @@ +from numpy._core.defchararray import __all__, __doc__ +from numpy._core.defchararray import * diff --git a/phivenv/Lib/site-packages/numpy/char/__init__.pyi b/phivenv/Lib/site-packages/numpy/char/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bff13a9955425f0ad8062085bca936947e12bdca --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/char/__init__.pyi @@ -0,0 +1,57 @@ +from numpy._core.defchararray import ( + equal as equal, + not_equal as not_equal, + greater_equal as greater_equal, + less_equal as less_equal, + greater as greater, + less as less, + str_len as str_len, + add as add, + multiply as multiply, + mod as mod, + capitalize as capitalize, + center as center, + count as count, + decode as decode, + encode as encode, + endswith as endswith, + expandtabs as expandtabs, + find as find, + index as index, + isalnum as isalnum, + isalpha as isalpha, + isdigit as isdigit, + islower as islower, + isspace as isspace, + istitle as istitle, + isupper as isupper, + join as join, + ljust as ljust, + lower as lower, + lstrip as lstrip, + partition as partition, + replace as replace, + rfind as rfind, + rindex as rindex, + rjust as rjust, + rpartition as rpartition, + rsplit as rsplit, + rstrip as rstrip, + split as split, + splitlines as splitlines, + startswith as startswith, + strip as strip, + swapcase as swapcase, + title as title, + translate as translate, + upper as upper, + zfill as zfill, + isnumeric as isnumeric, + isdecimal as isdecimal, + array as array, + asarray as asarray, + compare_chararrays as compare_chararrays, + chararray as chararray +) + +__all__: list[str] diff --git a/phivenv/Lib/site-packages/numpy/char/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/char/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6b00e59154111afefd2769a49253c323c109b4c Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/char/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/compat/__init__.py b/phivenv/Lib/site-packages/numpy/compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0869ce2949c277c82f5e40e335613cffc5e44bbc --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/compat/__init__.py @@ -0,0 +1,29 @@ +""" +Compatibility module. + +This module contains duplicated code from Python itself or 3rd party +extensions, which may be included for the following reasons: + + * compatibility + * we may only need a small subset of the copied library/module + +This module is deprecated since 1.26.0 and will be removed in future versions. + +""" + +import warnings + +from .._utils import _inspect +from .._utils._inspect import getargspec, formatargspec +from . import py3k +from .py3k import * + +warnings.warn( + "`np.compat`, which was used during the Python 2 to 3 transition," + " is deprecated since 1.26.0, and will be removed", + DeprecationWarning, stacklevel=2 +) + +__all__ = [] +__all__.extend(_inspect.__all__) +__all__.extend(py3k.__all__) diff --git a/phivenv/Lib/site-packages/numpy/compat/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/compat/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63aead271ae166fbd81ce94bc9dadbeb7d9a7ee9 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/compat/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/compat/__pycache__/py3k.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/compat/__pycache__/py3k.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3a6d383f2f184318409af4cec2e14859e240804 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/compat/__pycache__/py3k.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/compat/py3k.py b/phivenv/Lib/site-packages/numpy/compat/py3k.py new file mode 100644 index 0000000000000000000000000000000000000000..fa5e9da8a649ccaafe7b6499998d49dedbd90879 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/compat/py3k.py @@ -0,0 +1,145 @@ +""" +Python 3.X compatibility tools. + +While this file was originally intended for Python 2 -> 3 transition, +it is now used to create a compatibility layer between different +minor versions of Python 3. + +While the active version of numpy may not support a given version of python, we +allow downstream libraries to continue to use these shims for forward +compatibility with numpy while they transition their code to newer versions of +Python. +""" +__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', + 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', + 'asstr', 'open_latin1', 'long', 'basestring', 'sixu', + 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path', + 'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike'] + +import sys +import os +from pathlib import Path +import io +try: + import pickle5 as pickle +except ImportError: + import pickle + +long = int +integer_types = (int,) +basestring = str +unicode = str +bytes = bytes + +def asunicode(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + +def asbytes(s): + if isinstance(s, bytes): + return s + return str(s).encode('latin1') + +def asstr(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + +def isfileobj(f): + if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)): + return False + try: + # BufferedReader/Writer may raise OSError when + # fetching `fileno()` (e.g. when wrapping BytesIO). + f.fileno() + return True + except OSError: + return False + +def open_latin1(filename, mode='r'): + return open(filename, mode=mode, encoding='iso-8859-1') + +def sixu(s): + return s + +strchar = 'U' + +def getexception(): + return sys.exc_info()[1] + +def asbytes_nested(x): + if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): + return [asbytes_nested(y) for y in x] + else: + return asbytes(x) + +def asunicode_nested(x): + if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): + return [asunicode_nested(y) for y in x] + else: + return asunicode(x) + +def is_pathlib_path(obj): + """ + Check whether obj is a `pathlib.Path` object. + + Prefer using ``isinstance(obj, os.PathLike)`` instead of this function. + """ + return isinstance(obj, Path) + +# from Python 3.7 +class contextlib_nullcontext: + """Context manager that does no additional processing. + + Used as a stand-in for a normal context manager, when a particular + block of code is only sometimes used with a normal context manager: + + cm = optional_cm if condition else nullcontext() + with cm: + # Perform operation, using optional_cm if condition is True + + .. note:: + Prefer using `contextlib.nullcontext` instead of this context manager. + """ + + def __init__(self, enter_result=None): + self.enter_result = enter_result + + def __enter__(self): + return self.enter_result + + def __exit__(self, *excinfo): + pass + + +def npy_load_module(name, fn, info=None): + """ + Load a module. Uses ``load_module`` which will be deprecated in python + 3.12. An alternative that uses ``exec_module`` is in + numpy.distutils.misc_util.exec_mod_from_location + + .. versionadded:: 1.11.2 + + Parameters + ---------- + name : str + Full module name. + fn : str + Path to module file. + info : tuple, optional + Only here for backward compatibility with Python 2.*. + + Returns + ------- + mod : module + + """ + # Explicitly lazy import this to avoid paying the cost + # of importing importlib at startup + from importlib.machinery import SourceFileLoader + return SourceFileLoader(name, fn).load_module() + + +os_fspath = os.fspath +os_PathLike = os.PathLike diff --git a/phivenv/Lib/site-packages/numpy/compat/tests/__init__.py b/phivenv/Lib/site-packages/numpy/compat/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d910fd42367aeae0fcce79fec7dce984866527f Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/conftest.py b/phivenv/Lib/site-packages/numpy/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..0d16dff518b763205d2687098b9fe0b5f62e39fc --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/conftest.py @@ -0,0 +1,138 @@ +""" +Pytest configuration and fixtures for the Numpy test suite. +""" +import os +import tempfile + +import hypothesis +import pytest +import numpy + +from numpy._core._multiarray_tests import get_fpu_mode + + +_old_fpu_mode = None +_collect_results = {} + +# Use a known and persistent tmpdir for hypothesis' caches, which +# can be automatically cleared by the OS or user. +hypothesis.configuration.set_hypothesis_home_dir( + os.path.join(tempfile.gettempdir(), ".hypothesis") +) + +# We register two custom profiles for Numpy - for details see +# https://hypothesis.readthedocs.io/en/latest/settings.html +# The first is designed for our own CI runs; the latter also +# forces determinism and is designed for use via np.test() +hypothesis.settings.register_profile( + name="numpy-profile", deadline=None, print_blob=True, +) +hypothesis.settings.register_profile( + name="np.test() profile", + deadline=None, print_blob=True, database=None, derandomize=True, + suppress_health_check=list(hypothesis.HealthCheck), +) +# Note that the default profile is chosen based on the presence +# of pytest.ini, but can be overridden by passing the +# --hypothesis-profile=NAME argument to pytest. +_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini") +hypothesis.settings.load_profile( + "numpy-profile" if os.path.isfile(_pytest_ini) else "np.test() profile" +) + +# The experimentalAPI is used in _umath_tests +os.environ["NUMPY_EXPERIMENTAL_DTYPE_API"] = "1" + +def pytest_configure(config): + config.addinivalue_line("markers", + "valgrind_error: Tests that are known to error under valgrind.") + config.addinivalue_line("markers", + "leaks_references: Tests that are known to leak references.") + config.addinivalue_line("markers", + "slow: Tests that are very slow.") + config.addinivalue_line("markers", + "slow_pypy: Tests that are very slow on pypy.") + + +def pytest_addoption(parser): + parser.addoption("--available-memory", action="store", default=None, + help=("Set amount of memory available for running the " + "test suite. This can result to tests requiring " + "especially large amounts of memory to be skipped. " + "Equivalent to setting environment variable " + "NPY_AVAILABLE_MEM. Default: determined" + "automatically.")) + + +def pytest_sessionstart(session): + available_mem = session.config.getoption('available_memory') + if available_mem is not None: + os.environ['NPY_AVAILABLE_MEM'] = available_mem + + +#FIXME when yield tests are gone. +@pytest.hookimpl() +def pytest_itemcollected(item): + """ + Check FPU precision mode was not changed during test collection. + + The clumsy way we do it here is mainly necessary because numpy + still uses yield tests, which can execute code at test collection + time. + """ + global _old_fpu_mode + + mode = get_fpu_mode() + + if _old_fpu_mode is None: + _old_fpu_mode = mode + elif mode != _old_fpu_mode: + _collect_results[item] = (_old_fpu_mode, mode) + _old_fpu_mode = mode + + +@pytest.fixture(scope="function", autouse=True) +def check_fpu_mode(request): + """ + Check FPU precision mode was not changed during the test. + """ + old_mode = get_fpu_mode() + yield + new_mode = get_fpu_mode() + + if old_mode != new_mode: + raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" + " during the test".format(old_mode, new_mode)) + + collect_result = _collect_results.get(request.node) + if collect_result is not None: + old_mode, new_mode = collect_result + raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" + " when collecting the test".format(old_mode, + new_mode)) + + +@pytest.fixture(autouse=True) +def add_np(doctest_namespace): + doctest_namespace['np'] = numpy + +@pytest.fixture(autouse=True) +def env_setup(monkeypatch): + monkeypatch.setenv('PYTHONHASHSEED', '0') + + +@pytest.fixture(params=[True, False]) +def weak_promotion(request): + """ + Fixture to ensure "legacy" promotion state or change it to use the new + weak promotion (plus warning). `old_promotion` should be used as a + parameter in the function. + """ + state = numpy._get_promotion_state() + if request.param: + numpy._set_promotion_state("weak_and_warn") + else: + numpy._set_promotion_state("legacy") + + yield request.param + numpy._set_promotion_state(state) diff --git a/phivenv/Lib/site-packages/numpy/core/__init__.py b/phivenv/Lib/site-packages/numpy/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7e0f320c019d55285b92b44ddf4678c1aa0c7474 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/__init__.py @@ -0,0 +1,32 @@ +""" +The `numpy.core` submodule exists solely for backward compatibility +purposes. The original `core` was renamed to `_core` and made private. +`numpy.core` will be removed in the future. +""" +from numpy import _core +from ._utils import _raise_warning + + +# We used to use `np.core._ufunc_reconstruct` to unpickle. +# This is unnecessary, but old pickles saved before 1.20 will be using it, +# and there is no reason to break loading them. +def _ufunc_reconstruct(module, name): + # The `fromlist` kwarg is required to ensure that `mod` points to the + # inner-most module rather than the parent package when module name is + # nested. This makes it possible to pickle non-toplevel ufuncs such as + # scipy.special.expit for instance. + mod = __import__(module, fromlist=[name]) + return getattr(mod, name) + + +# force lazy-loading of submodules to ensure a warning is printed + +__all__ = ["arrayprint", "defchararray", "_dtype_ctypes", "_dtype", + "einsumfunc", "fromnumeric", "function_base", "getlimits", + "_internal", "multiarray", "_multiarray_umath", "numeric", + "numerictypes", "overrides", "records", "shape_base", "umath"] + +def __getattr__(attr_name): + attr = getattr(_core, attr_name) + _raise_warning(attr_name) + return attr diff --git a/phivenv/Lib/site-packages/numpy/core/__init__.pyi b/phivenv/Lib/site-packages/numpy/core/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..322b4a05e860e5cd9dd653c03548daecba277852 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/_dtype.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/_dtype.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..245fbe935859cfb7bf5ab75d146c5762144ff027 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/_dtype.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/_dtype_ctypes.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/_dtype_ctypes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd271cd4a08ab4af50fcd6315c1a958153527800 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/_dtype_ctypes.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/_internal.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/_internal.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6bcc49ce7e1033742d949cd152164570bfbd792 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/_internal.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/_multiarray_umath.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/_multiarray_umath.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0684e2cc7417c5aaeff59ecfbc4dc7977686cff Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/_multiarray_umath.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/_utils.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9034aab04e0e74fcb05c5ad132d95c2972e8bf8e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/_utils.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/arrayprint.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/arrayprint.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6186ed200501a6381173852b2ca8c6ebbbf0e373 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/arrayprint.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/defchararray.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/defchararray.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..249869ff817372cb0a61963978381fecbbd98b3a Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/defchararray.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/einsumfunc.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/einsumfunc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dadba603e0595883738187943bb30f5f9b2ffd41 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/einsumfunc.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/fromnumeric.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/fromnumeric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f4242a722db3c76e0d956e3e2b5b0a69727c323 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/fromnumeric.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/function_base.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/function_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f74d3e9a3bcc8ee9a6f2feac1ee6da24697f9ad8 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/function_base.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/getlimits.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/getlimits.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d07dfb457806362395af5cd36f92fb9ed28f136b Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/getlimits.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/multiarray.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/multiarray.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47d2b63a06e2763ff3d82e995508d6924edbd1b7 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/multiarray.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/numeric.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/numeric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..416d587961f36338be9a57fd111c8bc49eda8a6d Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/numeric.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/numerictypes.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/numerictypes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5140e35af2b21e6e644797797f389d6bc9f0a791 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/numerictypes.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/overrides.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/overrides.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e15199000a6d0c2e580669f0bdb07c3b0d7d2e6 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/overrides.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/records.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/records.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6c0f4134f2cc5e1892db856962dc4ee8c4bdfd8 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/records.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/shape_base.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/shape_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfc7d76496f44854c5e099f7c541078faf73e766 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/shape_base.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/__pycache__/umath.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/core/__pycache__/umath.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..754b05c4104ccb5f281bd5f0feabe7f49d04f458 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/core/__pycache__/umath.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/core/_dtype.py b/phivenv/Lib/site-packages/numpy/core/_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..7228c094163329211e608071e3cf6836f9adf2ca --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/_dtype.py @@ -0,0 +1,9 @@ +def __getattr__(attr_name): + from numpy._core import _dtype + from ._utils import _raise_warning + ret = getattr(_dtype, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core._dtype' has no attribute {attr_name}") + _raise_warning(attr_name, "_dtype") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/_dtype_ctypes.py b/phivenv/Lib/site-packages/numpy/core/_dtype_ctypes.py new file mode 100644 index 0000000000000000000000000000000000000000..e59a996b23abebb862e9256857cf628e989cd1f8 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/_dtype_ctypes.py @@ -0,0 +1,9 @@ +def __getattr__(attr_name): + from numpy._core import _dtype_ctypes + from ._utils import _raise_warning + ret = getattr(_dtype_ctypes, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core._dtype_ctypes' has no attribute {attr_name}") + _raise_warning(attr_name, "_dtype_ctypes") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/_internal.py b/phivenv/Lib/site-packages/numpy/core/_internal.py new file mode 100644 index 0000000000000000000000000000000000000000..3080bfb866a0addeb421563c2be9d7603821ee59 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/_internal.py @@ -0,0 +1,25 @@ +from numpy._core import _internal + +# Build a new array from the information in a pickle. +# Note that the name numpy.core._internal._reconstruct is embedded in +# pickles of ndarrays made with NumPy before release 1.0 +# so don't remove the name here, or you'll +# break backward compatibility. +def _reconstruct(subtype, shape, dtype): + from numpy import ndarray + return ndarray.__new__(subtype, shape, dtype) + + +# Pybind11 (in versions <= 2.11.1) imports _dtype_from_pep3118 from the +# _internal submodule, therefore it must be importable without a warning. +_dtype_from_pep3118 = _internal._dtype_from_pep3118 + +def __getattr__(attr_name): + from numpy._core import _internal + from ._utils import _raise_warning + ret = getattr(_internal, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core._internal' has no attribute {attr_name}") + _raise_warning(attr_name, "_internal") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/_multiarray_umath.py b/phivenv/Lib/site-packages/numpy/core/_multiarray_umath.py new file mode 100644 index 0000000000000000000000000000000000000000..235f66749c52837852c86091caa748e3ad2b3d84 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/_multiarray_umath.py @@ -0,0 +1,55 @@ +from numpy._core import _multiarray_umath +from numpy import ufunc + +for item in _multiarray_umath.__dir__(): + # ufuncs appear in pickles with a path in numpy.core._multiarray_umath + # and so must import from this namespace without warning or error + attr = getattr(_multiarray_umath, item) + if isinstance(attr, ufunc): + globals()[item] = attr + + +def __getattr__(attr_name): + from numpy._core import _multiarray_umath + from ._utils import _raise_warning + + if attr_name in {"_ARRAY_API", "_UFUNC_API"}: + from numpy.version import short_version + import textwrap + import traceback + import sys + + msg = textwrap.dedent(f""" + A module that was compiled using NumPy 1.x cannot be run in + NumPy {short_version} as it may crash. To support both 1.x and 2.x + versions of NumPy, modules must be compiled with NumPy 2.0. + Some module may need to rebuild instead e.g. with 'pybind11>=2.12'. + + If you are a user of the module, the easiest solution will be to + downgrade to 'numpy<2' or try to upgrade the affected module. + We expect that some modules will need time to support NumPy 2. + + """) + tb_msg = "Traceback (most recent call last):" + for line in traceback.format_stack()[:-1]: + if "frozen importlib" in line: + continue + tb_msg += line + + # Also print the message (with traceback). This is because old versions + # of NumPy unfortunately set up the import to replace (and hide) the + # error. The traceback shouldn't be needed, but e.g. pytest plugins + # seem to swallow it and we should be failing anyway... + sys.stderr.write(msg + tb_msg) + raise ImportError(msg) + + ret = getattr(_multiarray_umath, attr_name, None) + if ret is None: + raise AttributeError( + "module 'numpy.core._multiarray_umath' has no attribute " + f"{attr_name}") + _raise_warning(attr_name, "_multiarray_umath") + return ret + + +del _multiarray_umath, ufunc diff --git a/phivenv/Lib/site-packages/numpy/core/_utils.py b/phivenv/Lib/site-packages/numpy/core/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..428985d9ace74a5ccdecbf9fd0a8c3f7981637bd --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/_utils.py @@ -0,0 +1,21 @@ +import warnings + + +def _raise_warning(attr: str, submodule: str = None) -> None: + new_module = "numpy._core" + old_module = "numpy.core" + if submodule is not None: + new_module = f"{new_module}.{submodule}" + old_module = f"{old_module}.{submodule}" + warnings.warn( + f"{old_module} is deprecated and has been renamed to {new_module}. " + "The numpy._core namespace contains private NumPy internals and its " + "use is discouraged, as NumPy internals can change without warning in " + "any release. In practice, most real-world usage of numpy.core is to " + "access functionality in the public NumPy API. If that is the case, " + "use the public NumPy API. If not, you are using NumPy internals. " + "If you would still like to access an internal attribute, " + f"use {new_module}.{attr}.", + DeprecationWarning, + stacklevel=3 + ) diff --git a/phivenv/Lib/site-packages/numpy/core/arrayprint.py b/phivenv/Lib/site-packages/numpy/core/arrayprint.py new file mode 100644 index 0000000000000000000000000000000000000000..17bcd64befcd9a7fe812817ae729bae07ca5e19e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/arrayprint.py @@ -0,0 +1,9 @@ +def __getattr__(attr_name): + from numpy._core import arrayprint + from ._utils import _raise_warning + ret = getattr(arrayprint, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core.arrayprint' has no attribute {attr_name}") + _raise_warning(attr_name, "arrayprint") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/defchararray.py b/phivenv/Lib/site-packages/numpy/core/defchararray.py new file mode 100644 index 0000000000000000000000000000000000000000..941bcb7f55537d9e11669cf4fedf263469c2f22e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/defchararray.py @@ -0,0 +1,9 @@ +def __getattr__(attr_name): + from numpy._core import defchararray + from ._utils import _raise_warning + ret = getattr(defchararray, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core.defchararray' has no attribute {attr_name}") + _raise_warning(attr_name, "defchararray") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/einsumfunc.py b/phivenv/Lib/site-packages/numpy/core/einsumfunc.py new file mode 100644 index 0000000000000000000000000000000000000000..58d1c022889912c23642d7656bbb3edb21033f8e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/einsumfunc.py @@ -0,0 +1,9 @@ +def __getattr__(attr_name): + from numpy._core import einsumfunc + from ._utils import _raise_warning + ret = getattr(einsumfunc, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core.einsumfunc' has no attribute {attr_name}") + _raise_warning(attr_name, "einsumfunc") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/fromnumeric.py b/phivenv/Lib/site-packages/numpy/core/fromnumeric.py new file mode 100644 index 0000000000000000000000000000000000000000..987a6e26a4bc6e177badf7b848c40ceddc7b073b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/fromnumeric.py @@ -0,0 +1,9 @@ +def __getattr__(attr_name): + from numpy._core import fromnumeric + from ._utils import _raise_warning + ret = getattr(fromnumeric, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core.fromnumeric' has no attribute {attr_name}") + _raise_warning(attr_name, "fromnumeric") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/function_base.py b/phivenv/Lib/site-packages/numpy/core/function_base.py new file mode 100644 index 0000000000000000000000000000000000000000..3e7332bc8cd64fdbb97534b7e7f5f9ae7a18b592 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/function_base.py @@ -0,0 +1,9 @@ +def __getattr__(attr_name): + from numpy._core import function_base + from ._utils import _raise_warning + ret = getattr(function_base, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core.function_base' has no attribute {attr_name}") + _raise_warning(attr_name, "function_base") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/getlimits.py b/phivenv/Lib/site-packages/numpy/core/getlimits.py new file mode 100644 index 0000000000000000000000000000000000000000..2a0b78d3c4c9a15bb44fbd037e935a00110852aa --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/getlimits.py @@ -0,0 +1,9 @@ +def __getattr__(attr_name): + from numpy._core import getlimits + from ._utils import _raise_warning + ret = getattr(getlimits, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core.getlimits' has no attribute {attr_name}") + _raise_warning(attr_name, "getlimits") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/multiarray.py b/phivenv/Lib/site-packages/numpy/core/multiarray.py new file mode 100644 index 0000000000000000000000000000000000000000..ea7f4db814defc4c8b7f150d8a41f4e19af7bc1c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/multiarray.py @@ -0,0 +1,24 @@ +from numpy._core import multiarray + +# these must import without warning or error from numpy.core.multiarray to +# support old pickle files +for item in ["_reconstruct", "scalar"]: + globals()[item] = getattr(multiarray, item) + +# Pybind11 (in versions <= 2.11.1) imports _ARRAY_API from the multiarray +# submodule as a part of NumPy initialization, therefore it must be importable +# without a warning. +_ARRAY_API = multiarray._ARRAY_API + +def __getattr__(attr_name): + from numpy._core import multiarray + from ._utils import _raise_warning + ret = getattr(multiarray, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core.multiarray' has no attribute {attr_name}") + _raise_warning(attr_name, "multiarray") + return ret + + +del multiarray diff --git a/phivenv/Lib/site-packages/numpy/core/numeric.py b/phivenv/Lib/site-packages/numpy/core/numeric.py new file mode 100644 index 0000000000000000000000000000000000000000..deb3cfc679d226eb4afdae05513fa0a2950d179d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/numeric.py @@ -0,0 +1,11 @@ +def __getattr__(attr_name): + from numpy._core import numeric + from ._utils import _raise_warning + + sentinel = object() + ret = getattr(numeric, attr_name, sentinel) + if ret is sentinel: + raise AttributeError( + f"module 'numpy.core.numeric' has no attribute {attr_name}") + _raise_warning(attr_name, "numeric") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/numerictypes.py b/phivenv/Lib/site-packages/numpy/core/numerictypes.py new file mode 100644 index 0000000000000000000000000000000000000000..2faf7ec66fe812c7e951ebbac15879a48e1743ff --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/numerictypes.py @@ -0,0 +1,9 @@ +def __getattr__(attr_name): + from numpy._core import numerictypes + from ._utils import _raise_warning + ret = getattr(numerictypes, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core.numerictypes' has no attribute {attr_name}") + _raise_warning(attr_name, "numerictypes") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/overrides.py b/phivenv/Lib/site-packages/numpy/core/overrides.py new file mode 100644 index 0000000000000000000000000000000000000000..a4143039d68fa370e215c245efdd55ec41633f68 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/overrides.py @@ -0,0 +1,9 @@ +def __getattr__(attr_name): + from numpy._core import overrides + from ._utils import _raise_warning + ret = getattr(overrides, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core.overrides' has no attribute {attr_name}") + _raise_warning(attr_name, "overrides") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/records.py b/phivenv/Lib/site-packages/numpy/core/records.py new file mode 100644 index 0000000000000000000000000000000000000000..180f2012a569314debf33d3062a88fc566f6ed8a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/records.py @@ -0,0 +1,9 @@ +def __getattr__(attr_name): + from numpy._core import records + from ._utils import _raise_warning + ret = getattr(records, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core.records' has no attribute {attr_name}") + _raise_warning(attr_name, "records") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/shape_base.py b/phivenv/Lib/site-packages/numpy/core/shape_base.py new file mode 100644 index 0000000000000000000000000000000000000000..f5ef30797919b0c9e86fefef6c4bb29c4a974370 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/shape_base.py @@ -0,0 +1,9 @@ +def __getattr__(attr_name): + from numpy._core import shape_base + from ._utils import _raise_warning + ret = getattr(shape_base, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core.shape_base' has no attribute {attr_name}") + _raise_warning(attr_name, "shape_base") + return ret diff --git a/phivenv/Lib/site-packages/numpy/core/umath.py b/phivenv/Lib/site-packages/numpy/core/umath.py new file mode 100644 index 0000000000000000000000000000000000000000..af8fff191f5f173cfd70babd5fd511de88125b18 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/core/umath.py @@ -0,0 +1,9 @@ +def __getattr__(attr_name): + from numpy._core import umath + from ._utils import _raise_warning + ret = getattr(umath, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.core.umath' has no attribute {attr_name}") + _raise_warning(attr_name, "umath") + return ret diff --git a/phivenv/Lib/site-packages/numpy/ctypeslib.py b/phivenv/Lib/site-packages/numpy/ctypeslib.py new file mode 100644 index 0000000000000000000000000000000000000000..03b17a07fd8e012e0209dd41eb88b70324e568b1 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ctypeslib.py @@ -0,0 +1,545 @@ +""" +============================ +``ctypes`` Utility Functions +============================ + +See Also +-------- +load_library : Load a C library. +ndpointer : Array restype/argtype with verification. +as_ctypes : Create a ctypes array from an ndarray. +as_array : Create an ndarray from a ctypes array. + +References +---------- +.. [1] "SciPy Cookbook: ctypes", https://scipy-cookbook.readthedocs.io/items/Ctypes.html + +Examples +-------- +Load the C library: + +>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP + +Our result type, an ndarray that must be of type double, be 1-dimensional +and is C-contiguous in memory: + +>>> array_1d_double = np.ctypeslib.ndpointer( +... dtype=np.double, +... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP + +Our C-function typically takes an array and updates its values +in-place. For example:: + + void foo_func(double* x, int length) + { + int i; + for (i = 0; i < length; i++) { + x[i] = i*i; + } + } + +We wrap it using: + +>>> _lib.foo_func.restype = None #doctest: +SKIP +>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP + +Then, we're ready to call ``foo_func``: + +>>> out = np.empty(15, dtype=np.double) +>>> _lib.foo_func(out, len(out)) #doctest: +SKIP + +""" +__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array', + 'as_ctypes_type'] + +import os +from numpy import ( + integer, ndarray, dtype as _dtype, asarray, frombuffer +) +from numpy._core.multiarray import _flagdict, flagsobj + +try: + import ctypes +except ImportError: + ctypes = None + +if ctypes is None: + def _dummy(*args, **kwds): + """ + Dummy object that raises an ImportError if ctypes is not available. + + Raises + ------ + ImportError + If ctypes is not available. + + """ + raise ImportError("ctypes is not available.") + load_library = _dummy + as_ctypes = _dummy + as_array = _dummy + from numpy import intp as c_intp + _ndptr_base = object +else: + import numpy._core._internal as nic + c_intp = nic._getintp_ctype() + del nic + _ndptr_base = ctypes.c_void_p + + # Adapted from Albert Strasheim + def load_library(libname, loader_path): + """ + It is possible to load a library using + + >>> lib = ctypes.cdll[] # doctest: +SKIP + + But there are cross-platform considerations, such as library file extensions, + plus the fact Windows will just load the first library it finds with that name. + NumPy supplies the load_library function as a convenience. + + .. versionchanged:: 1.20.0 + Allow libname and loader_path to take any + :term:`python:path-like object`. + + Parameters + ---------- + libname : path-like + Name of the library, which can have 'lib' as a prefix, + but without an extension. + loader_path : path-like + Where the library can be found. + + Returns + ------- + ctypes.cdll[libpath] : library object + A ctypes library object + + Raises + ------ + OSError + If there is no library with the expected extension, or the + library is defective and cannot be loaded. + """ + # Convert path-like objects into strings + libname = os.fsdecode(libname) + loader_path = os.fsdecode(loader_path) + + ext = os.path.splitext(libname)[1] + if not ext: + import sys + import sysconfig + # Try to load library with platform-specific name, otherwise + # default to libname.[so|dll|dylib]. Sometimes, these files are + # built erroneously on non-linux platforms. + base_ext = ".so" + if sys.platform.startswith("darwin"): + base_ext = ".dylib" + elif sys.platform.startswith("win"): + base_ext = ".dll" + libname_ext = [libname + base_ext] + so_ext = sysconfig.get_config_var("EXT_SUFFIX") + if not so_ext == base_ext: + libname_ext.insert(0, libname + so_ext) + else: + libname_ext = [libname] + + loader_path = os.path.abspath(loader_path) + if not os.path.isdir(loader_path): + libdir = os.path.dirname(loader_path) + else: + libdir = loader_path + + for ln in libname_ext: + libpath = os.path.join(libdir, ln) + if os.path.exists(libpath): + try: + return ctypes.cdll[libpath] + except OSError: + ## defective lib file + raise + ## if no successful return in the libname_ext loop: + raise OSError("no file with expected extension") + + +def _num_fromflags(flaglist): + num = 0 + for val in flaglist: + num += _flagdict[val] + return num + +_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', + 'OWNDATA', 'WRITEBACKIFCOPY'] +def _flags_fromnum(num): + res = [] + for key in _flagnames: + value = _flagdict[key] + if (num & value): + res.append(key) + return res + + +class _ndptr(_ndptr_base): + @classmethod + def from_param(cls, obj): + if not isinstance(obj, ndarray): + raise TypeError("argument must be an ndarray") + if cls._dtype_ is not None \ + and obj.dtype != cls._dtype_: + raise TypeError("array must have data type %s" % cls._dtype_) + if cls._ndim_ is not None \ + and obj.ndim != cls._ndim_: + raise TypeError("array must have %d dimension(s)" % cls._ndim_) + if cls._shape_ is not None \ + and obj.shape != cls._shape_: + raise TypeError("array must have shape %s" % str(cls._shape_)) + if cls._flags_ is not None \ + and ((obj.flags.num & cls._flags_) != cls._flags_): + raise TypeError("array must have flags %s" % + _flags_fromnum(cls._flags_)) + return obj.ctypes + + +class _concrete_ndptr(_ndptr): + """ + Like _ndptr, but with `_shape_` and `_dtype_` specified. + + Notably, this means the pointer has enough information to reconstruct + the array, which is not generally true. + """ + def _check_retval_(self): + """ + This method is called when this class is used as the .restype + attribute for a shared-library function, to automatically wrap the + pointer into an array. + """ + return self.contents + + @property + def contents(self): + """ + Get an ndarray viewing the data pointed to by this pointer. + + This mirrors the `contents` attribute of a normal ctypes pointer + """ + full_dtype = _dtype((self._dtype_, self._shape_)) + full_ctype = ctypes.c_char * full_dtype.itemsize + buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents + return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0) + + +# Factory for an array-checking class with from_param defined for +# use with ctypes argtypes mechanism +_pointer_type_cache = {} +def ndpointer(dtype=None, ndim=None, shape=None, flags=None): + """ + Array-checking restype/argtypes. + + An ndpointer instance is used to describe an ndarray in restypes + and argtypes specifications. This approach is more flexible than + using, for example, ``POINTER(c_double)``, since several restrictions + can be specified, which are verified upon calling the ctypes function. + These include data type, number of dimensions, shape and flags. If a + given array does not satisfy the specified restrictions, + a ``TypeError`` is raised. + + Parameters + ---------- + dtype : data-type, optional + Array data-type. + ndim : int, optional + Number of array dimensions. + shape : tuple of ints, optional + Array shape. + flags : str or tuple of str + Array flags; may be one or more of: + + - C_CONTIGUOUS / C / CONTIGUOUS + - F_CONTIGUOUS / F / FORTRAN + - OWNDATA / O + - WRITEABLE / W + - ALIGNED / A + - WRITEBACKIFCOPY / X + + Returns + ------- + klass : ndpointer type object + A type object, which is an ``_ndtpr`` instance containing + dtype, ndim, shape and flags information. + + Raises + ------ + TypeError + If a given array does not satisfy the specified restrictions. + + Examples + -------- + >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64, + ... ndim=1, + ... flags='C_CONTIGUOUS')] + ... #doctest: +SKIP + >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64)) + ... #doctest: +SKIP + + """ + + # normalize dtype to an Optional[dtype] + if dtype is not None: + dtype = _dtype(dtype) + + # normalize flags to an Optional[int] + num = None + if flags is not None: + if isinstance(flags, str): + flags = flags.split(',') + elif isinstance(flags, (int, integer)): + num = flags + flags = _flags_fromnum(num) + elif isinstance(flags, flagsobj): + num = flags.num + flags = _flags_fromnum(num) + if num is None: + try: + flags = [x.strip().upper() for x in flags] + except Exception as e: + raise TypeError("invalid flags specification") from e + num = _num_fromflags(flags) + + # normalize shape to an Optional[tuple] + if shape is not None: + try: + shape = tuple(shape) + except TypeError: + # single integer -> 1-tuple + shape = (shape,) + + cache_key = (dtype, ndim, shape, num) + + try: + return _pointer_type_cache[cache_key] + except KeyError: + pass + + # produce a name for the new type + if dtype is None: + name = 'any' + elif dtype.names is not None: + name = str(id(dtype)) + else: + name = dtype.str + if ndim is not None: + name += "_%dd" % ndim + if shape is not None: + name += "_"+"x".join(str(x) for x in shape) + if flags is not None: + name += "_"+"_".join(flags) + + if dtype is not None and shape is not None: + base = _concrete_ndptr + else: + base = _ndptr + + klass = type("ndpointer_%s"%name, (base,), + {"_dtype_": dtype, + "_shape_" : shape, + "_ndim_" : ndim, + "_flags_" : num}) + _pointer_type_cache[cache_key] = klass + return klass + + +if ctypes is not None: + def _ctype_ndarray(element_type, shape): + """ Create an ndarray of the given element type and shape """ + for dim in shape[::-1]: + element_type = dim * element_type + # prevent the type name include np.ctypeslib + element_type.__module__ = None + return element_type + + + def _get_scalar_type_map(): + """ + Return a dictionary mapping native endian scalar dtype to ctypes types + """ + ct = ctypes + simple_types = [ + ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong, + ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong, + ct.c_float, ct.c_double, + ct.c_bool, + ] + return {_dtype(ctype): ctype for ctype in simple_types} + + + _scalar_type_map = _get_scalar_type_map() + + + def _ctype_from_dtype_scalar(dtype): + # swapping twice ensure that `=` is promoted to <, >, or | + dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S') + dtype_native = dtype.newbyteorder('=') + try: + ctype = _scalar_type_map[dtype_native] + except KeyError as e: + raise NotImplementedError( + "Converting {!r} to a ctypes type".format(dtype) + ) from None + + if dtype_with_endian.byteorder == '>': + ctype = ctype.__ctype_be__ + elif dtype_with_endian.byteorder == '<': + ctype = ctype.__ctype_le__ + + return ctype + + + def _ctype_from_dtype_subarray(dtype): + element_dtype, shape = dtype.subdtype + ctype = _ctype_from_dtype(element_dtype) + return _ctype_ndarray(ctype, shape) + + + def _ctype_from_dtype_structured(dtype): + # extract offsets of each field + field_data = [] + for name in dtype.names: + field_dtype, offset = dtype.fields[name][:2] + field_data.append((offset, name, _ctype_from_dtype(field_dtype))) + + # ctypes doesn't care about field order + field_data = sorted(field_data, key=lambda f: f[0]) + + if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data): + # union, if multiple fields all at address 0 + size = 0 + _fields_ = [] + for offset, name, ctype in field_data: + _fields_.append((name, ctype)) + size = max(size, ctypes.sizeof(ctype)) + + # pad to the right size + if dtype.itemsize != size: + _fields_.append(('', ctypes.c_char * dtype.itemsize)) + + # we inserted manual padding, so always `_pack_` + return type('union', (ctypes.Union,), dict( + _fields_=_fields_, + _pack_=1, + __module__=None, + )) + else: + last_offset = 0 + _fields_ = [] + for offset, name, ctype in field_data: + padding = offset - last_offset + if padding < 0: + raise NotImplementedError("Overlapping fields") + if padding > 0: + _fields_.append(('', ctypes.c_char * padding)) + + _fields_.append((name, ctype)) + last_offset = offset + ctypes.sizeof(ctype) + + + padding = dtype.itemsize - last_offset + if padding > 0: + _fields_.append(('', ctypes.c_char * padding)) + + # we inserted manual padding, so always `_pack_` + return type('struct', (ctypes.Structure,), dict( + _fields_=_fields_, + _pack_=1, + __module__=None, + )) + + + def _ctype_from_dtype(dtype): + if dtype.fields is not None: + return _ctype_from_dtype_structured(dtype) + elif dtype.subdtype is not None: + return _ctype_from_dtype_subarray(dtype) + else: + return _ctype_from_dtype_scalar(dtype) + + + def as_ctypes_type(dtype): + r""" + Convert a dtype into a ctypes type. + + Parameters + ---------- + dtype : dtype + The dtype to convert + + Returns + ------- + ctype + A ctype scalar, union, array, or struct + + Raises + ------ + NotImplementedError + If the conversion is not possible + + Notes + ----- + This function does not losslessly round-trip in either direction. + + ``np.dtype(as_ctypes_type(dt))`` will: + + - insert padding fields + - reorder fields to be sorted by offset + - discard field titles + + ``as_ctypes_type(np.dtype(ctype))`` will: + + - discard the class names of `ctypes.Structure`\ s and + `ctypes.Union`\ s + - convert single-element `ctypes.Union`\ s into single-element + `ctypes.Structure`\ s + - insert padding fields + + """ + return _ctype_from_dtype(_dtype(dtype)) + + + def as_array(obj, shape=None): + """ + Create a numpy array from a ctypes array or POINTER. + + The numpy array shares the memory with the ctypes object. + + The shape parameter must be given if converting from a ctypes POINTER. + The shape parameter is ignored if converting from a ctypes array + """ + if isinstance(obj, ctypes._Pointer): + # convert pointers to an array of the desired shape + if shape is None: + raise TypeError( + 'as_array() requires a shape argument when called on a ' + 'pointer') + p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape)) + obj = ctypes.cast(obj, p_arr_type).contents + + return asarray(obj) + + + def as_ctypes(obj): + """Create and return a ctypes object from a numpy array. Actually + anything that exposes the __array_interface__ is accepted.""" + ai = obj.__array_interface__ + if ai["strides"]: + raise TypeError("strided arrays not supported") + if ai["version"] != 3: + raise TypeError("only __array_interface__ version 3 supported") + addr, readonly = ai["data"] + if readonly: + raise TypeError("readonly arrays unsupported") + + # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows + # dtype.itemsize (gh-14214) + ctype_scalar = as_ctypes_type(ai["typestr"]) + result_type = _ctype_ndarray(ctype_scalar, ai["shape"]) + result = result_type.from_address(addr) + result.__keep = obj + return result diff --git a/phivenv/Lib/site-packages/numpy/ctypeslib.pyi b/phivenv/Lib/site-packages/numpy/ctypeslib.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a2b52fe3146dd4f105399981b2c01235fadc2f3b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ctypeslib.pyi @@ -0,0 +1,253 @@ +# NOTE: Numpy's mypy plugin is used for importing the correct +# platform-specific `ctypes._SimpleCData[int]` sub-type +from ctypes import c_int64 as _c_intp + +import os +import ctypes +from collections.abc import Iterable, Sequence +from typing import ( + Literal as L, + Any, + TypeVar, + Generic, + overload, + ClassVar, +) + +import numpy as np +from numpy import ( + ndarray, + dtype, + generic, + byte, + short, + intc, + long, + longlong, + intp, + ubyte, + ushort, + uintc, + ulong, + ulonglong, + uintp, + single, + double, + longdouble, + void, +) +from numpy._core._internal import _ctypes +from numpy._core.multiarray import flagsobj +from numpy._typing import ( + # Arrays + NDArray, + _ArrayLike, + + # Shapes + _ShapeLike, + + # DTypes + DTypeLike, + _DTypeLike, + _VoidDTypeLike, + _BoolCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _LongCodes, + _LongLongCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, +) + +# TODO: Add a proper `_Shape` bound once we've got variadic typevars +_DType = TypeVar("_DType", bound=dtype[Any]) +_DTypeOptional = TypeVar("_DTypeOptional", bound=None | dtype[Any]) +_SCT = TypeVar("_SCT", bound=generic) + +_FlagsKind = L[ + 'C_CONTIGUOUS', 'CONTIGUOUS', 'C', + 'F_CONTIGUOUS', 'FORTRAN', 'F', + 'ALIGNED', 'A', + 'WRITEABLE', 'W', + 'OWNDATA', 'O', + 'WRITEBACKIFCOPY', 'X', +] + +# TODO: Add a shape typevar once we have variadic typevars (PEP 646) +class _ndptr(ctypes.c_void_p, Generic[_DTypeOptional]): + # In practice these 4 classvars are defined in the dynamic class + # returned by `ndpointer` + _dtype_: ClassVar[_DTypeOptional] + _shape_: ClassVar[None] + _ndim_: ClassVar[None | int] + _flags_: ClassVar[None | list[_FlagsKind]] + + @overload + @classmethod + def from_param(cls: type[_ndptr[None]], obj: NDArray[Any]) -> _ctypes[Any]: ... + @overload + @classmethod + def from_param(cls: type[_ndptr[_DType]], obj: ndarray[Any, _DType]) -> _ctypes[Any]: ... + +class _concrete_ndptr(_ndptr[_DType]): + _dtype_: ClassVar[_DType] + _shape_: ClassVar[tuple[int, ...]] + @property + def contents(self) -> ndarray[Any, _DType]: ... + +def load_library( + libname: str | bytes | os.PathLike[str] | os.PathLike[bytes], + loader_path: str | bytes | os.PathLike[str] | os.PathLike[bytes], +) -> ctypes.CDLL: ... + +__all__: list[str] + +c_intp = _c_intp + +@overload +def ndpointer( + dtype: None = ..., + ndim: int = ..., + shape: None | _ShapeLike = ..., + flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., +) -> type[_ndptr[None]]: ... +@overload +def ndpointer( + dtype: _DTypeLike[_SCT], + ndim: int = ..., + *, + shape: _ShapeLike, + flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., +) -> type[_concrete_ndptr[dtype[_SCT]]]: ... +@overload +def ndpointer( + dtype: DTypeLike, + ndim: int = ..., + *, + shape: _ShapeLike, + flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., +) -> type[_concrete_ndptr[dtype[Any]]]: ... +@overload +def ndpointer( + dtype: _DTypeLike[_SCT], + ndim: int = ..., + shape: None = ..., + flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., +) -> type[_ndptr[dtype[_SCT]]]: ... +@overload +def ndpointer( + dtype: DTypeLike, + ndim: int = ..., + shape: None = ..., + flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., +) -> type[_ndptr[dtype[Any]]]: ... + +@overload +def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ... +@overload +def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ... +@overload +def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ... +@overload +def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ... +@overload +def as_ctypes_type(dtype: _LongCodes | _DTypeLike[long] | type[ctypes.c_long]) -> type[ctypes.c_long]: ... +@overload +def as_ctypes_type(dtype: type[int]) -> type[c_intp]: ... +@overload +def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ... +@overload +def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ... +@overload +def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ... +@overload +def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ... +@overload +def as_ctypes_type(dtype: _ULongCodes | _DTypeLike[ulong] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ... +@overload +def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ... +@overload +def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ... +@overload +def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ... +@overload +def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ... +@overload +def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ctypes.Union` or `ctypes.Structure` +@overload +def as_ctypes_type(dtype: str) -> type[Any]: ... + +@overload +def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... +@overload +def as_array(obj: _ArrayLike[_SCT], shape: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +@overload +def as_array(obj: object, shape: None | _ShapeLike = ...) -> NDArray[Any]: ... + +@overload +def as_ctypes(obj: np.bool) -> ctypes.c_bool: ... +@overload +def as_ctypes(obj: byte) -> ctypes.c_byte: ... +@overload +def as_ctypes(obj: short) -> ctypes.c_short: ... +@overload +def as_ctypes(obj: intc) -> ctypes.c_int: ... +@overload +def as_ctypes(obj: long) -> ctypes.c_long: ... +@overload +def as_ctypes(obj: longlong) -> ctypes.c_longlong: ... +@overload +def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ... +@overload +def as_ctypes(obj: ushort) -> ctypes.c_ushort: ... +@overload +def as_ctypes(obj: uintc) -> ctypes.c_uint: ... +@overload +def as_ctypes(obj: ulong) -> ctypes.c_ulong: ... +@overload +def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ... +@overload +def as_ctypes(obj: single) -> ctypes.c_float: ... +@overload +def as_ctypes(obj: double) -> ctypes.c_double: ... +@overload +def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ... +@overload +def as_ctypes(obj: void) -> Any: ... # `ctypes.Union` or `ctypes.Structure` +@overload +def as_ctypes(obj: NDArray[np.bool]) -> ctypes.Array[ctypes.c_bool]: ... +@overload +def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ... +@overload +def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ... +@overload +def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ... +@overload +def as_ctypes(obj: NDArray[long]) -> ctypes.Array[ctypes.c_long]: ... +@overload +def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ... +@overload +def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ... +@overload +def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ... +@overload +def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ... +@overload +def as_ctypes(obj: NDArray[ulong]) -> ctypes.Array[ctypes.c_ulong]: ... +@overload +def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ... +@overload +def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ... +@overload +def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ... +@overload +def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ... +@overload +def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ... # `ctypes.Union` or `ctypes.Structure` diff --git a/phivenv/Lib/site-packages/numpy/distutils/__init__.py b/phivenv/Lib/site-packages/numpy/distutils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8c0b42bea90ffd193239476d0abd3b3645076203 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/__init__.py @@ -0,0 +1,64 @@ +""" +An enhanced distutils, providing support for Fortran compilers, for BLAS, +LAPACK and other common libraries for numerical computing, and more. + +Public submodules are:: + + misc_util + system_info + cpu_info + log + exec_command + +For details, please see the *Packaging* and *NumPy Distutils User Guide* +sections of the NumPy Reference Guide. + +For configuring the preference for and location of libraries like BLAS and +LAPACK, and for setting include paths and similar build options, please see +``site.cfg.example`` in the root of the NumPy repository or sdist. + +""" + +import warnings + +# Must import local ccompiler ASAP in order to get +# customized CCompiler.spawn effective. +from . import ccompiler +from . import unixccompiler + +from .npy_pkg_config import * + +warnings.warn("\n\n" + " `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n" + " of the deprecation of `distutils` itself. It will be removed for\n" + " Python >= 3.12. For older Python versions it will remain present.\n" + " It is recommended to use `setuptools < 60.0` for those Python versions.\n" + " For more details, see:\n" + " https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n", + DeprecationWarning, stacklevel=2 +) +del warnings + +# If numpy is installed, add distutils.test() +try: + from . import __config__ + # Normally numpy is installed if the above import works, but an interrupted + # in-place build could also have left a __config__.py. In that case the + # next import may still fail, so keep it inside the try block. + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester +except ImportError: + pass + + +def customized_fcompiler(plat=None, compiler=None): + from numpy.distutils.fcompiler import new_fcompiler + c = new_fcompiler(plat=plat, compiler=compiler) + c.customize() + return c + +def customized_ccompiler(plat=None, compiler=None, verbose=1): + c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose) + c.customize('') + return c diff --git a/phivenv/Lib/site-packages/numpy/distutils/__init__.pyi b/phivenv/Lib/site-packages/numpy/distutils/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..48c64650efee3596c29b5e86e5079382e8ee0966 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/__init__.pyi @@ -0,0 +1,4 @@ +from typing import Any + +# TODO: remove when the full numpy namespace is defined +def __getattr__(name: str) -> Any: ... diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b7e3ba43f654c30c1138f57eb522ff4fa717c64 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/_shell_utils.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/_shell_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ce2c041063835eed7978599bd6c24972d1c758e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/_shell_utils.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/armccompiler.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/armccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63f6a048fc786fb3afcac8955cc1bbbc4595f331 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/armccompiler.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/ccompiler.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/ccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ae634fd70d60b50557858f94f6e4e6307eee8ea Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/ccompiler.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/ccompiler_opt.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/ccompiler_opt.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d33bf0afe327da83a926610f3bae84afb664f00 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/ccompiler_opt.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/conv_template.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/conv_template.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71b8ac4d06138f2ab9ce1e22f912abae168b373f Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/conv_template.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/core.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/core.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68fa8d05bc2a3822b5366d185e4036754326484c Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/core.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/cpuinfo.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/cpuinfo.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7a9971f3ffb95eae2a4d6b9c1b4a482e95e9fdf Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/cpuinfo.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/exec_command.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/exec_command.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9e75293efd6d6f43062f453343465109e0daa47 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/exec_command.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/extension.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/extension.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0483d21ae46e335632de5dcaa4fc34f9367fe27f Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/extension.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/from_template.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/from_template.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6df822cabc0268661ea12113042b09354dee799 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/from_template.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/fujitsuccompiler.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/fujitsuccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7aeeca45bff61fa10391d0aae6400bdada6df05c Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/fujitsuccompiler.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/intelccompiler.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/intelccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d11974fedda33623c8d2320095a63c53f659553 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/intelccompiler.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/lib2def.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/lib2def.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4b2904eb7b230191b959cefaff94d19477256a5 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/lib2def.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/line_endings.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/line_endings.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aa8d239cd4bffb9976cbc549a647f7a6e404837 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/line_endings.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/log.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/log.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b711a9630446b6399fe698b81cb6003f863f4f2 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/log.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/mingw32ccompiler.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/mingw32ccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4364675e09bcefd231d7ab5086c222f53143e4a0 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/mingw32ccompiler.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/misc_util.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/misc_util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a9e57068fb6a7165a5d52de1aa7ca3baa6583bd Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/misc_util.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/msvc9compiler.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/msvc9compiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..558c408305ae27e9927a1527b8f231602affb76f Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/msvc9compiler.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/msvccompiler.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/msvccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1add14da4ada44c02c9f078dc1c4c9f7d7818092 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/msvccompiler.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/npy_pkg_config.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/npy_pkg_config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a6ceaaae0cac5996047de87faba9f2e7ac0d4d7 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/npy_pkg_config.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/numpy_distribution.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/numpy_distribution.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b56eb9ba03d727f3c55e1a6711abfb52d0058dff Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/numpy_distribution.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/pathccompiler.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/pathccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97473df83b8571e6d4f7a44c434b6ed7c74b926e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/pathccompiler.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/system_info.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/system_info.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d02474f581704586042278dca24a7fda3e4fd21 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/system_info.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/__pycache__/unixccompiler.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/unixccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f257c34b9d66787604c79ea42ac857b1ef272601 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/__pycache__/unixccompiler.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/_shell_utils.py b/phivenv/Lib/site-packages/numpy/distutils/_shell_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bb019f77b734d6221f80f67eb43905d45533d9a1 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/_shell_utils.py @@ -0,0 +1,87 @@ +""" +Helper functions for interacting with the shell, and consuming shell-style +parameters provided in config files. +""" +import os +import shlex +import subprocess + +__all__ = ['WindowsParser', 'PosixParser', 'NativeParser'] + + +class CommandLineParser: + """ + An object that knows how to split and join command-line arguments. + + It must be true that ``argv == split(join(argv))`` for all ``argv``. + The reverse neednt be true - `join(split(cmd))` may result in the addition + or removal of unnecessary escaping. + """ + @staticmethod + def join(argv): + """ Join a list of arguments into a command line string """ + raise NotImplementedError + + @staticmethod + def split(cmd): + """ Split a command line string into a list of arguments """ + raise NotImplementedError + + +class WindowsParser: + """ + The parsing behavior used by `subprocess.call("string")` on Windows, which + matches the Microsoft C/C++ runtime. + + Note that this is _not_ the behavior of cmd. + """ + @staticmethod + def join(argv): + # note that list2cmdline is specific to the windows syntax + return subprocess.list2cmdline(argv) + + @staticmethod + def split(cmd): + import ctypes # guarded import for systems without ctypes + try: + ctypes.windll + except AttributeError: + raise NotImplementedError + + # Windows has special parsing rules for the executable (no quotes), + # that we do not care about - insert a dummy element + if not cmd: + return [] + cmd = 'dummy ' + cmd + + CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW + CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p) + CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int)) + + nargs = ctypes.c_int() + lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs)) + args = [lpargs[i] for i in range(nargs.value)] + assert not ctypes.windll.kernel32.LocalFree(lpargs) + + # strip the element we inserted + assert args[0] == "dummy" + return args[1:] + + +class PosixParser: + """ + The parsing behavior used by `subprocess.call("string", shell=True)` on Posix. + """ + @staticmethod + def join(argv): + return ' '.join(shlex.quote(arg) for arg in argv) + + @staticmethod + def split(cmd): + return shlex.split(cmd, posix=True) + + +if os.name == 'nt': + NativeParser = WindowsParser +elif os.name == 'posix': + NativeParser = PosixParser diff --git a/phivenv/Lib/site-packages/numpy/distutils/armccompiler.py b/phivenv/Lib/site-packages/numpy/distutils/armccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..4a96b03616f5f441c1f0556bb2e1cdb88a95d738 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/armccompiler.py @@ -0,0 +1,26 @@ +from distutils.unixccompiler import UnixCCompiler + +class ArmCCompiler(UnixCCompiler): + + """ + Arm compiler. + """ + + compiler_type = 'arm' + cc_exe = 'armclang' + cxx_exe = 'armclang++' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + cc_compiler = self.cc_exe + cxx_compiler = self.cxx_exe + self.set_executables(compiler=cc_compiler + + ' -O3 -fPIC', + compiler_so=cc_compiler + + ' -O3 -fPIC', + compiler_cxx=cxx_compiler + + ' -O3 -fPIC', + linker_exe=cc_compiler + + ' -lamath', + linker_so=cc_compiler + + ' -lamath -shared') diff --git a/phivenv/Lib/site-packages/numpy/distutils/ccompiler.py b/phivenv/Lib/site-packages/numpy/distutils/ccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..43b8b3217ff75192b46d46f2087fc8afe953c28b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/ccompiler.py @@ -0,0 +1,826 @@ +import os +import re +import sys +import platform +import shlex +import time +import subprocess +from copy import copy +from pathlib import Path +from distutils import ccompiler +from distutils.ccompiler import ( + compiler_class, gen_lib_options, get_default_compiler, new_compiler, + CCompiler +) +from distutils.errors import ( + DistutilsExecError, DistutilsModuleError, DistutilsPlatformError, + CompileError, UnknownFileError +) +from distutils.sysconfig import customize_compiler +from distutils.version import LooseVersion + +from numpy.distutils import log +from numpy.distutils.exec_command import ( + filepath_from_subprocess_output, forward_bytes_to_stdout +) +from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ + get_num_build_jobs, \ + _commandline_dep_string, \ + sanitize_cxx_flags + +# globals for parallel build management +import threading + +_job_semaphore = None +_global_lock = threading.Lock() +_processing_files = set() + + +def _needs_build(obj, cc_args, extra_postargs, pp_opts): + """ + Check if an objects needs to be rebuild based on its dependencies + + Parameters + ---------- + obj : str + object file + + Returns + ------- + bool + """ + # defined in unixcompiler.py + dep_file = obj + '.d' + if not os.path.exists(dep_file): + return True + + # dep_file is a makefile containing 'object: dependencies' + # formatted like posix shell (spaces escaped, \ line continuations) + # the last line contains the compiler commandline arguments as some + # projects may compile an extension multiple times with different + # arguments + with open(dep_file) as f: + lines = f.readlines() + + cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts) + last_cmdline = lines[-1] + if last_cmdline != cmdline: + return True + + contents = ''.join(lines[:-1]) + deps = [x for x in shlex.split(contents, posix=True) + if x != "\n" and not x.endswith(":")] + + try: + t_obj = os.stat(obj).st_mtime + + # check if any of the dependencies is newer than the object + # the dependencies includes the source used to create the object + for f in deps: + if os.stat(f).st_mtime > t_obj: + return True + except OSError: + # no object counts as newer (shouldn't happen if dep_file exists) + return True + + return False + + +def replace_method(klass, method_name, func): + # Py3k does not have unbound method anymore, MethodType does not work + m = lambda self, *args, **kw: func(self, *args, **kw) + setattr(klass, method_name, m) + + +###################################################################### +## Method that subclasses may redefine. But don't call this method, +## it i private to CCompiler class and may return unexpected +## results if used elsewhere. So, you have been warned.. + +def CCompiler_find_executables(self): + """ + Does nothing here, but is called by the get_version method and can be + overridden by subclasses. In particular it is redefined in the `FCompiler` + class where more documentation can be found. + + """ + pass + + +replace_method(CCompiler, 'find_executables', CCompiler_find_executables) + + +# Using customized CCompiler.spawn. +def CCompiler_spawn(self, cmd, display=None, env=None): + """ + Execute a command in a sub-process. + + Parameters + ---------- + cmd : str + The command to execute. + display : str or sequence of str, optional + The text to add to the log file kept by `numpy.distutils`. + If not given, `display` is equal to `cmd`. + env : a dictionary for environment variables, optional + + Returns + ------- + None + + Raises + ------ + DistutilsExecError + If the command failed, i.e. the exit status was not 0. + + """ + env = env if env is not None else dict(os.environ) + if display is None: + display = cmd + if is_sequence(display): + display = ' '.join(list(display)) + log.info(display) + try: + if self.verbose: + subprocess.check_output(cmd, env=env) + else: + subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) + except subprocess.CalledProcessError as exc: + o = exc.output + s = exc.returncode + except OSError as e: + # OSError doesn't have the same hooks for the exception + # output, but exec_command() historically would use an + # empty string for EnvironmentError (base class for + # OSError) + # o = b'' + # still that would make the end-user lost in translation! + o = f"\n\n{e}\n\n\n" + try: + o = o.encode(sys.stdout.encoding) + except AttributeError: + o = o.encode('utf8') + # status previously used by exec_command() for parent + # of OSError + s = 127 + else: + # use a convenience return here so that any kind of + # caught exception will execute the default code after the + # try / except block, which handles various exceptions + return None + + if is_sequence(cmd): + cmd = ' '.join(list(cmd)) + + if self.verbose: + forward_bytes_to_stdout(o) + + if re.search(b'Too many open files', o): + msg = '\nTry rerunning setup command until build succeeds.' + else: + msg = '' + raise DistutilsExecError('Command "%s" failed with exit status %d%s' % + (cmd, s, msg)) + +replace_method(CCompiler, 'spawn', CCompiler_spawn) + +def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): + """ + Return the name of the object files for the given source files. + + Parameters + ---------- + source_filenames : list of str + The list of paths to source files. Paths can be either relative or + absolute, this is handled transparently. + strip_dir : bool, optional + Whether to strip the directory from the returned paths. If True, + the file name prepended by `output_dir` is returned. Default is False. + output_dir : str, optional + If given, this path is prepended to the returned paths to the + object files. + + Returns + ------- + obj_names : list of str + The list of paths to the object files corresponding to the source + files in `source_filenames`. + + """ + if output_dir is None: + output_dir = '' + obj_names = [] + for src_name in source_filenames: + base, ext = os.path.splitext(os.path.normpath(src_name)) + base = os.path.splitdrive(base)[1] # Chop off the drive + base = base[os.path.isabs(base):] # If abs, chop off leading / + if base.startswith('..'): + # Resolve starting relative path components, middle ones + # (if any) have been handled by os.path.normpath above. + i = base.rfind('..')+2 + d = base[:i] + d = os.path.basename(os.path.abspath(d)) + base = d + base[i:] + if ext not in self.src_extensions: + raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) + if strip_dir: + base = os.path.basename(base) + obj_name = os.path.join(output_dir, base + self.obj_extension) + obj_names.append(obj_name) + return obj_names + +replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) + +def CCompiler_compile(self, sources, output_dir=None, macros=None, + include_dirs=None, debug=0, extra_preargs=None, + extra_postargs=None, depends=None): + """ + Compile one or more source files. + + Please refer to the Python distutils API reference for more details. + + Parameters + ---------- + sources : list of str + A list of filenames + output_dir : str, optional + Path to the output directory. + macros : list of tuples + A list of macro definitions. + include_dirs : list of str, optional + The directories to add to the default include file search path for + this compilation only. + debug : bool, optional + Whether or not to output debug symbols in or alongside the object + file(s). + extra_preargs, extra_postargs : ? + Extra pre- and post-arguments. + depends : list of str, optional + A list of file names that all targets depend on. + + Returns + ------- + objects : list of str + A list of object file names, one per source file `sources`. + + Raises + ------ + CompileError + If compilation fails. + + """ + global _job_semaphore + + jobs = get_num_build_jobs() + + # setup semaphore to not exceed number of compile jobs when parallelized at + # extension level (python >= 3.5) + with _global_lock: + if _job_semaphore is None: + _job_semaphore = threading.Semaphore(jobs) + + if not sources: + return [] + from numpy.distutils.fcompiler import (FCompiler, + FORTRAN_COMMON_FIXED_EXTENSIONS, + has_f90_header) + if isinstance(self, FCompiler): + display = [] + for fc in ['f77', 'f90', 'fix']: + fcomp = getattr(self, 'compiler_'+fc) + if fcomp is None: + continue + display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) + display = '\n'.join(display) + else: + ccomp = self.compiler_so + display = "C compiler: %s\n" % (' '.join(ccomp),) + log.info(display) + macros, objects, extra_postargs, pp_opts, build = \ + self._setup_compile(output_dir, macros, include_dirs, sources, + depends, extra_postargs) + cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) + display = "compile options: '%s'" % (' '.join(cc_args)) + if extra_postargs: + display += "\nextra options: '%s'" % (' '.join(extra_postargs)) + log.info(display) + + def single_compile(args): + obj, (src, ext) = args + if not _needs_build(obj, cc_args, extra_postargs, pp_opts): + return + + # check if we are currently already processing the same object + # happens when using the same source in multiple extensions + while True: + # need explicit lock as there is no atomic check and add with GIL + with _global_lock: + # file not being worked on, start working + if obj not in _processing_files: + _processing_files.add(obj) + break + # wait for the processing to end + time.sleep(0.1) + + try: + # retrieve slot from our #job semaphore and build + with _job_semaphore: + self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) + finally: + # register being done processing + with _global_lock: + _processing_files.remove(obj) + + + if isinstance(self, FCompiler): + objects_to_build = list(build.keys()) + f77_objects, other_objects = [], [] + for obj in objects: + if obj in objects_to_build: + src, ext = build[obj] + if self.compiler_type=='absoft': + obj = cyg2win32(obj) + src = cyg2win32(src) + if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ + and not has_f90_header(src): + f77_objects.append((obj, (src, ext))) + else: + other_objects.append((obj, (src, ext))) + + # f77 objects can be built in parallel + build_items = f77_objects + # build f90 modules serial, module files are generated during + # compilation and may be used by files later in the list so the + # ordering is important + for o in other_objects: + single_compile(o) + else: + build_items = build.items() + + if len(build) > 1 and jobs > 1: + # build parallel + from concurrent.futures import ThreadPoolExecutor + with ThreadPoolExecutor(jobs) as pool: + res = pool.map(single_compile, build_items) + list(res) # access result to raise errors + else: + # build serial + for o in build_items: + single_compile(o) + + # Return *all* object filenames, not just the ones we just built. + return objects + +replace_method(CCompiler, 'compile', CCompiler_compile) + +def CCompiler_customize_cmd(self, cmd, ignore=()): + """ + Customize compiler using distutils command. + + Parameters + ---------- + cmd : class instance + An instance inheriting from ``distutils.cmd.Command``. + ignore : sequence of str, optional + List of ``distutils.ccompiler.CCompiler`` commands (without ``'set_'``) that should not be + altered. Strings that are checked for are: + ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', + 'rpath', 'link_objects')``. + + Returns + ------- + None + + """ + log.info('customize %s using %s' % (self.__class__.__name__, + cmd.__class__.__name__)) + + if ( + hasattr(self, 'compiler') and + 'clang' in self.compiler[0] and + not (platform.machine() == 'arm64' and sys.platform == 'darwin') + ): + # clang defaults to a non-strict floating error point model. + # However, '-ftrapping-math' is not currently supported (2023-04-08) + # for macosx_arm64. + # Since NumPy and most Python libs give warnings for these, override: + self.compiler.append('-ftrapping-math') + self.compiler_so.append('-ftrapping-math') + + def allow(attr): + return getattr(cmd, attr, None) is not None and attr not in ignore + + if allow('include_dirs'): + self.set_include_dirs(cmd.include_dirs) + if allow('define'): + for (name, value) in cmd.define: + self.define_macro(name, value) + if allow('undef'): + for macro in cmd.undef: + self.undefine_macro(macro) + if allow('libraries'): + self.set_libraries(self.libraries + cmd.libraries) + if allow('library_dirs'): + self.set_library_dirs(self.library_dirs + cmd.library_dirs) + if allow('rpath'): + self.set_runtime_library_dirs(cmd.rpath) + if allow('link_objects'): + self.set_link_objects(cmd.link_objects) + +replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) + +def _compiler_to_string(compiler): + props = [] + mx = 0 + keys = list(compiler.executables.keys()) + for key in ['version', 'libraries', 'library_dirs', + 'object_switch', 'compile_switch', + 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: + if key not in keys: + keys.append(key) + for key in keys: + if hasattr(compiler, key): + v = getattr(compiler, key) + mx = max(mx, len(key)) + props.append((key, repr(v))) + fmt = '%-' + repr(mx+1) + 's = %s' + lines = [fmt % prop for prop in props] + return '\n'.join(lines) + +def CCompiler_show_customization(self): + """ + Print the compiler customizations to stdout. + + Parameters + ---------- + None + + Returns + ------- + None + + Notes + ----- + Printing is only done if the distutils log threshold is < 2. + + """ + try: + self.get_version() + except Exception: + pass + if log._global_log.threshold<2: + print('*'*80) + print(self.__class__) + print(_compiler_to_string(self)) + print('*'*80) + +replace_method(CCompiler, 'show_customization', CCompiler_show_customization) + +def CCompiler_customize(self, dist, need_cxx=0): + """ + Do any platform-specific customization of a compiler instance. + + This method calls ``distutils.sysconfig.customize_compiler`` for + platform-specific customization, as well as optionally remove a flag + to suppress spurious warnings in case C++ code is being compiled. + + Parameters + ---------- + dist : object + This parameter is not used for anything. + need_cxx : bool, optional + Whether or not C++ has to be compiled. If so (True), the + ``"-Wstrict-prototypes"`` option is removed to prevent spurious + warnings. Default is False. + + Returns + ------- + None + + Notes + ----- + All the default options used by distutils can be extracted with:: + + from distutils import sysconfig + sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', + 'CCSHARED', 'LDSHARED', 'SO') + + """ + # See FCompiler.customize for suggested usage. + log.info('customize %s' % (self.__class__.__name__)) + customize_compiler(self) + if need_cxx: + # In general, distutils uses -Wstrict-prototypes, but this option is + # not valid for C++ code, only for C. Remove it if it's there to + # avoid a spurious warning on every compilation. + try: + self.compiler_so.remove('-Wstrict-prototypes') + except (AttributeError, ValueError): + pass + + if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: + if not self.compiler_cxx: + if self.compiler[0].startswith('gcc'): + a, b = 'gcc', 'g++' + else: + a, b = 'cc', 'c++' + self.compiler_cxx = [self.compiler[0].replace(a, b)]\ + + self.compiler[1:] + else: + if hasattr(self, 'compiler'): + log.warn("#### %s #######" % (self.compiler,)) + if not hasattr(self, 'compiler_cxx'): + log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) + + + # check if compiler supports gcc style automatic dependencies + # run on every extension so skip for known good compilers + if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or + 'g++' in self.compiler[0] or + 'clang' in self.compiler[0]): + self._auto_depends = True + elif os.name == 'posix': + import tempfile + import shutil + tmpdir = tempfile.mkdtemp() + try: + fn = os.path.join(tmpdir, "file.c") + with open(fn, "w") as f: + f.write("int a;\n") + self.compile([fn], output_dir=tmpdir, + extra_preargs=['-MMD', '-MF', fn + '.d']) + self._auto_depends = True + except CompileError: + self._auto_depends = False + finally: + shutil.rmtree(tmpdir) + + return + +replace_method(CCompiler, 'customize', CCompiler_customize) + +def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): + """ + Simple matching of version numbers, for use in CCompiler and FCompiler. + + Parameters + ---------- + pat : str, optional + A regular expression matching version numbers. + Default is ``r'[-.\\d]+'``. + ignore : str, optional + A regular expression matching patterns to skip. + Default is ``''``, in which case nothing is skipped. + start : str, optional + A regular expression matching the start of where to start looking + for version numbers. + Default is ``''``, in which case searching is started at the + beginning of the version string given to `matcher`. + + Returns + ------- + matcher : callable + A function that is appropriate to use as the ``.version_match`` + attribute of a ``distutils.ccompiler.CCompiler`` class. `matcher` takes a single parameter, + a version string. + + """ + def matcher(self, version_string): + # version string may appear in the second line, so getting rid + # of new lines: + version_string = version_string.replace('\n', ' ') + pos = 0 + if start: + m = re.match(start, version_string) + if not m: + return None + pos = m.end() + while True: + m = re.search(pat, version_string[pos:]) + if not m: + return None + if ignore and re.match(ignore, m.group(0)): + pos = m.end() + continue + break + return m.group(0) + return matcher + +def CCompiler_get_version(self, force=False, ok_status=[0]): + """ + Return compiler version, or None if compiler is not available. + + Parameters + ---------- + force : bool, optional + If True, force a new determination of the version, even if the + compiler already has a version attribute. Default is False. + ok_status : list of int, optional + The list of status values returned by the version look-up process + for which a version string is returned. If the status value is not + in `ok_status`, None is returned. Default is ``[0]``. + + Returns + ------- + version : str or None + Version string, in the format of ``distutils.version.LooseVersion``. + + """ + if not force and hasattr(self, 'version'): + return self.version + self.find_executables() + try: + version_cmd = self.version_cmd + except AttributeError: + return None + if not version_cmd or not version_cmd[0]: + return None + try: + matcher = self.version_match + except AttributeError: + try: + pat = self.version_pattern + except AttributeError: + return None + def matcher(version_string): + m = re.match(pat, version_string) + if not m: + return None + version = m.group('version') + return version + + try: + output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as exc: + output = exc.output + status = exc.returncode + except OSError: + # match the historical returns for a parent + # exception class caught by exec_command() + status = 127 + output = b'' + else: + # output isn't actually a filepath but we do this + # for now to match previous distutils behavior + output = filepath_from_subprocess_output(output) + status = 0 + + version = None + if status in ok_status: + version = matcher(output) + if version: + version = LooseVersion(version) + self.version = version + return version + +replace_method(CCompiler, 'get_version', CCompiler_get_version) + +def CCompiler_cxx_compiler(self): + """ + Return the C++ compiler. + + Parameters + ---------- + None + + Returns + ------- + cxx : class instance + The C++ compiler, as a ``distutils.ccompiler.CCompiler`` instance. + + """ + if self.compiler_type in ('msvc', 'intelw', 'intelemw'): + return self + + cxx = copy(self) + cxx.compiler_cxx = cxx.compiler_cxx + cxx.compiler_so = [cxx.compiler_cxx[0]] + \ + sanitize_cxx_flags(cxx.compiler_so[1:]) + if (sys.platform.startswith(('aix', 'os400')) and + 'ld_so_aix' in cxx.linker_so[0]): + # AIX needs the ld_so_aix script included with Python + cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ + + cxx.linker_so[2:] + if sys.platform.startswith('os400'): + #This is required by i 7.4 and prievous for PRId64 in printf() call. + cxx.compiler_so.append('-D__STDC_FORMAT_MACROS') + #This a bug of gcc10.3, which failed to handle the TLS init. + cxx.compiler_so.append('-fno-extern-tls-init') + cxx.linker_so.append('-fno-extern-tls-init') + else: + cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] + return cxx + +replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) + +compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', + "Intel C Compiler for 32-bit applications") +compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', + "Intel C Itanium Compiler for Itanium-based applications") +compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', + "Intel C Compiler for 64-bit applications") +compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW', + "Intel C Compiler for 32-bit applications on Windows") +compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW', + "Intel C Compiler for 64-bit applications on Windows") +compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', + "PathScale Compiler for SiCortex-based applications") +compiler_class['arm'] = ('armccompiler', 'ArmCCompiler', + "Arm C Compiler") +compiler_class['fujitsu'] = ('fujitsuccompiler', 'FujitsuCCompiler', + "Fujitsu C Compiler") + +ccompiler._default_compilers += (('linux.*', 'intel'), + ('linux.*', 'intele'), + ('linux.*', 'intelem'), + ('linux.*', 'pathcc'), + ('nt', 'intelw'), + ('nt', 'intelemw')) + +if sys.platform == 'win32': + compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', + "Mingw32 port of GNU C Compiler for Win32"\ + "(for MSC built Python)") + if mingw32(): + # On windows platforms, we want to default to mingw32 (gcc) + # because msvc can't build blitz stuff. + log.info('Setting mingw32 as default compiler for nt.') + ccompiler._default_compilers = (('nt', 'mingw32'),) \ + + ccompiler._default_compilers + + +_distutils_new_compiler = new_compiler +def new_compiler (plat=None, + compiler=None, + verbose=None, + dry_run=0, + force=0): + # Try first C compilers from numpy.distutils. + if verbose is None: + verbose = log.get_threshold() <= log.INFO + if plat is None: + plat = os.name + try: + if compiler is None: + compiler = get_default_compiler(plat) + (module_name, class_name, long_description) = compiler_class[compiler] + except KeyError: + msg = "don't know how to compile C/C++ code on platform '%s'" % plat + if compiler is not None: + msg = msg + " with '%s' compiler" % compiler + raise DistutilsPlatformError(msg) + module_name = "numpy.distutils." + module_name + try: + __import__ (module_name) + except ImportError as e: + msg = str(e) + log.info('%s in numpy.distutils; trying from distutils', + str(msg)) + module_name = module_name[6:] + try: + __import__(module_name) + except ImportError as e: + msg = str(e) + raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \ + module_name) + try: + module = sys.modules[module_name] + klass = vars(module)[class_name] + except KeyError: + raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " + + "in module '%s'") % (class_name, module_name)) + compiler = klass(None, dry_run, force) + compiler.verbose = verbose + log.debug('new_compiler returns %s' % (klass)) + return compiler + +ccompiler.new_compiler = new_compiler + +_distutils_gen_lib_options = gen_lib_options +def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): + # the version of this function provided by CPython allows the following + # to return lists, which are unpacked automatically: + # - compiler.runtime_library_dir_option + # our version extends the behavior to: + # - compiler.library_dir_option + # - compiler.library_option + # - compiler.find_library_file + r = _distutils_gen_lib_options(compiler, library_dirs, + runtime_library_dirs, libraries) + lib_opts = [] + for i in r: + if is_sequence(i): + lib_opts.extend(list(i)) + else: + lib_opts.append(i) + return lib_opts +ccompiler.gen_lib_options = gen_lib_options + +# Also fix up the various compiler modules, which do +# from distutils.ccompiler import gen_lib_options +# Don't bother with mwerks, as we don't support Classic Mac. +for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: + _m = sys.modules.get('distutils.' + _cc + 'compiler') + if _m is not None: + setattr(_m, 'gen_lib_options', gen_lib_options) + diff --git a/phivenv/Lib/site-packages/numpy/distutils/ccompiler_opt.py b/phivenv/Lib/site-packages/numpy/distutils/ccompiler_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..6b0b2af9d9e2aa98f054c2185b802c6e4aa66f08 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/ccompiler_opt.py @@ -0,0 +1,2668 @@ +"""Provides the `CCompilerOpt` class, used for handling the CPU/hardware +optimization, starting from parsing the command arguments, to managing the +relation between the CPU baseline and dispatch-able features, +also generating the required C headers and ending with compiling +the sources with proper compiler's flags. + +`CCompilerOpt` doesn't provide runtime detection for the CPU features, +instead only focuses on the compiler side, but it creates abstract C headers +that can be used later for the final runtime dispatching process.""" + +import atexit +import inspect +import os +import pprint +import re +import subprocess +import textwrap + +class _Config: + """An abstract class holds all configurable attributes of `CCompilerOpt`, + these class attributes can be used to change the default behavior + of `CCompilerOpt` in order to fit other requirements. + + Attributes + ---------- + conf_nocache : bool + Set True to disable memory and file cache. + Default is False. + + conf_noopt : bool + Set True to forces the optimization to be disabled, + in this case `CCompilerOpt` tends to generate all + expected headers in order to 'not' break the build. + Default is False. + + conf_cache_factors : list + Add extra factors to the primary caching factors. The caching factors + are utilized to determine if there are changes had happened that + requires to discard the cache and re-updating it. The primary factors + are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc). + Default is list of two items, containing the time of last modification + of `ccompiler_opt` and value of attribute "conf_noopt" + + conf_tmp_path : str, + The path of temporary directory. Default is auto-created + temporary directory via ``tempfile.mkdtemp()``. + + conf_check_path : str + The path of testing files. Each added CPU feature must have a + **C** source file contains at least one intrinsic or instruction that + related to this feature, so it can be tested against the compiler. + Default is ``./distutils/checks``. + + conf_target_groups : dict + Extra tokens that can be reached from dispatch-able sources through + the special mark ``@targets``. Default is an empty dictionary. + + **Notes**: + - case-insensitive for tokens and group names + - sign '#' must stick in the begin of group name and only within ``@targets`` + + **Example**: + .. code-block:: console + + $ "@targets #avx_group other_tokens" > group_inside.c + + >>> CCompilerOpt.conf_target_groups["avx_group"] = \\ + "$werror $maxopt avx2 avx512f avx512_skx" + >>> cco = CCompilerOpt(cc_instance) + >>> cco.try_dispatch(["group_inside.c"]) + + conf_c_prefix : str + The prefix of public C definitions. Default is ``"NPY_"``. + + conf_c_prefix_ : str + The prefix of internal C definitions. Default is ``"NPY__"``. + + conf_cc_flags : dict + Nested dictionaries defining several compiler flags + that linked to some major functions, the main key + represent the compiler name and sub-keys represent + flags names. Default is already covers all supported + **C** compilers. + + Sub-keys explained as follows: + + "native": str or None + used by argument option `native`, to detect the current + machine support via the compiler. + "werror": str or None + utilized to treat warning as errors during testing CPU features + against the compiler and also for target's policy `$werror` + via dispatch-able sources. + "maxopt": str or None + utilized for target's policy '$maxopt' and the value should + contains the maximum acceptable optimization by the compiler. + e.g. in gcc ``'-O3'`` + + **Notes**: + * case-sensitive for compiler names and flags + * use space to separate multiple flags + * any flag will tested against the compiler and it will skipped + if it's not applicable. + + conf_min_features : dict + A dictionary defines the used CPU features for + argument option ``'min'``, the key represent the CPU architecture + name e.g. ``'x86'``. Default values provide the best effort + on wide range of users platforms. + + **Note**: case-sensitive for architecture names. + + conf_features : dict + Nested dictionaries used for identifying the CPU features. + the primary key is represented as a feature name or group name + that gathers several features. Default values covers all + supported features but without the major options like "flags", + these undefined options handle it by method `conf_features_partial()`. + Default value is covers almost all CPU features for *X86*, *IBM/Power64* + and *ARM 7/8*. + + Sub-keys explained as follows: + + "implies" : str or list, optional, + List of CPU feature names to be implied by it, + the feature name must be defined within `conf_features`. + Default is None. + + "flags": str or list, optional + List of compiler flags. Default is None. + + "detect": str or list, optional + List of CPU feature names that required to be detected + in runtime. By default, its the feature name or features + in "group" if its specified. + + "implies_detect": bool, optional + If True, all "detect" of implied features will be combined. + Default is True. see `feature_detect()`. + + "group": str or list, optional + Same as "implies" but doesn't require the feature name to be + defined within `conf_features`. + + "interest": int, required + a key for sorting CPU features + + "headers": str or list, optional + intrinsics C header file + + "disable": str, optional + force disable feature, the string value should contains the + reason of disabling. + + "autovec": bool or None, optional + True or False to declare that CPU feature can be auto-vectorized + by the compiler. + By default(None), treated as True if the feature contains at + least one applicable flag. see `feature_can_autovec()` + + "extra_checks": str or list, optional + Extra test case names for the CPU feature that need to be tested + against the compiler. + + Each test case must have a C file named ``extra_xxxx.c``, where + ``xxxx`` is the case name in lower case, under 'conf_check_path'. + It should contain at least one intrinsic or function related to the test case. + + If the compiler able to successfully compile the C file then `CCompilerOpt` + will add a C ``#define`` for it into the main dispatch header, e.g. + ``#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case. + + **NOTES**: + * space can be used as separator with options that supports "str or list" + * case-sensitive for all values and feature name must be in upper-case. + * if flags aren't applicable, its will skipped rather than disable the + CPU feature + * the CPU feature will disabled if the compiler fail to compile + the test file + """ + conf_nocache = False + conf_noopt = False + conf_cache_factors = None + conf_tmp_path = None + conf_check_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "checks" + ) + conf_target_groups = {} + conf_c_prefix = 'NPY_' + conf_c_prefix_ = 'NPY__' + conf_cc_flags = dict( + gcc = dict( + # native should always fail on arm and ppc64, + # native usually works only with x86 + native = '-march=native', + opt = '-O3', + werror = '-Werror', + ), + clang = dict( + native = '-march=native', + opt = "-O3", + # One of the following flags needs to be applicable for Clang to + # guarantee the sanity of the testing process, however in certain + # cases `-Werror` gets skipped during the availability test due to + # "unused arguments" warnings. + # see https://github.com/numpy/numpy/issues/19624 + werror = '-Werror=switch -Werror', + ), + icc = dict( + native = '-xHost', + opt = '-O3', + werror = '-Werror', + ), + iccw = dict( + native = '/QxHost', + opt = '/O3', + werror = '/Werror', + ), + msvc = dict( + native = None, + opt = '/O2', + werror = '/WX', + ), + fcc = dict( + native = '-mcpu=a64fx', + opt = None, + werror = None, + ) + ) + conf_min_features = dict( + x86 = "SSE SSE2", + x64 = "SSE SSE2 SSE3", + ppc64 = '', # play it safe + ppc64le = "VSX VSX2", + s390x = '', + armhf = '', # play it safe + aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD" + ) + conf_features = dict( + # X86 + SSE = dict( + interest=1, headers="xmmintrin.h", + # enabling SSE without SSE2 is useless also + # it's non-optional for x86_64 + implies="SSE2" + ), + SSE2 = dict(interest=2, implies="SSE", headers="emmintrin.h"), + SSE3 = dict(interest=3, implies="SSE2", headers="pmmintrin.h"), + SSSE3 = dict(interest=4, implies="SSE3", headers="tmmintrin.h"), + SSE41 = dict(interest=5, implies="SSSE3", headers="smmintrin.h"), + POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"), + SSE42 = dict(interest=7, implies="POPCNT"), + AVX = dict( + interest=8, implies="SSE42", headers="immintrin.h", + implies_detect=False + ), + XOP = dict(interest=9, implies="AVX", headers="x86intrin.h"), + FMA4 = dict(interest=10, implies="AVX", headers="x86intrin.h"), + F16C = dict(interest=11, implies="AVX"), + FMA3 = dict(interest=12, implies="F16C"), + AVX2 = dict(interest=13, implies="F16C"), + AVX512F = dict( + interest=20, implies="FMA3 AVX2", implies_detect=False, + extra_checks="AVX512F_REDUCE" + ), + AVX512CD = dict(interest=21, implies="AVX512F"), + AVX512_KNL = dict( + interest=40, implies="AVX512CD", group="AVX512ER AVX512PF", + detect="AVX512_KNL", implies_detect=False + ), + AVX512_KNM = dict( + interest=41, implies="AVX512_KNL", + group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ", + detect="AVX512_KNM", implies_detect=False + ), + AVX512_SKX = dict( + interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ", + detect="AVX512_SKX", implies_detect=False, + extra_checks="AVX512BW_MASK AVX512DQ_MASK" + ), + AVX512_CLX = dict( + interest=43, implies="AVX512_SKX", group="AVX512VNNI", + detect="AVX512_CLX" + ), + AVX512_CNL = dict( + interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI", + detect="AVX512_CNL", implies_detect=False + ), + AVX512_ICL = dict( + interest=45, implies="AVX512_CLX AVX512_CNL", + group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ", + detect="AVX512_ICL", implies_detect=False + ), + AVX512_SPR = dict( + interest=46, implies="AVX512_ICL", group="AVX512FP16", + detect="AVX512_SPR", implies_detect=False + ), + # IBM/Power + ## Power7/ISA 2.06 + VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"), + ## Power8/ISA 2.07 + VSX2 = dict(interest=2, implies="VSX", implies_detect=False), + ## Power9/ISA 3.00 + VSX3 = dict(interest=3, implies="VSX2", implies_detect=False, + extra_checks="VSX3_HALF_DOUBLE"), + ## Power10/ISA 3.1 + VSX4 = dict(interest=4, implies="VSX3", implies_detect=False, + extra_checks="VSX4_MMA"), + # IBM/Z + ## VX(z13) support + VX = dict(interest=1, headers="vecintrin.h"), + ## Vector-Enhancements Facility + VXE = dict(interest=2, implies="VX", implies_detect=False), + ## Vector-Enhancements Facility 2 + VXE2 = dict(interest=3, implies="VXE", implies_detect=False), + # ARM + NEON = dict(interest=1, headers="arm_neon.h"), + NEON_FP16 = dict(interest=2, implies="NEON"), + ## FMA + NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"), + ## Advanced SIMD + ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False), + ## ARMv8.2 half-precision & vector arithm + ASIMDHP = dict(interest=5, implies="ASIMD"), + ## ARMv8.2 dot product + ASIMDDP = dict(interest=6, implies="ASIMD"), + ## ARMv8.2 Single & half-precision Multiply + ASIMDFHM = dict(interest=7, implies="ASIMDHP"), + ) + def conf_features_partial(self): + """Return a dictionary of supported CPU features by the platform, + and accumulate the rest of undefined options in `conf_features`, + the returned dict has same rules and notes in + class attribute `conf_features`, also its override + any options that been set in 'conf_features'. + """ + if self.cc_noopt: + # optimization is disabled + return {} + + on_x86 = self.cc_on_x86 or self.cc_on_x64 + is_unix = self.cc_is_gcc or self.cc_is_clang or self.cc_is_fcc + + if on_x86 and is_unix: return dict( + SSE = dict(flags="-msse"), + SSE2 = dict(flags="-msse2"), + SSE3 = dict(flags="-msse3"), + SSSE3 = dict(flags="-mssse3"), + SSE41 = dict(flags="-msse4.1"), + POPCNT = dict(flags="-mpopcnt"), + SSE42 = dict(flags="-msse4.2"), + AVX = dict(flags="-mavx"), + F16C = dict(flags="-mf16c"), + XOP = dict(flags="-mxop"), + FMA4 = dict(flags="-mfma4"), + FMA3 = dict(flags="-mfma"), + AVX2 = dict(flags="-mavx2"), + AVX512F = dict(flags="-mavx512f -mno-mmx"), + AVX512CD = dict(flags="-mavx512cd"), + AVX512_KNL = dict(flags="-mavx512er -mavx512pf"), + AVX512_KNM = dict( + flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq" + ), + AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"), + AVX512_CLX = dict(flags="-mavx512vnni"), + AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"), + AVX512_ICL = dict( + flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq" + ), + AVX512_SPR = dict(flags="-mavx512fp16"), + ) + if on_x86 and self.cc_is_icc: return dict( + SSE = dict(flags="-msse"), + SSE2 = dict(flags="-msse2"), + SSE3 = dict(flags="-msse3"), + SSSE3 = dict(flags="-mssse3"), + SSE41 = dict(flags="-msse4.1"), + POPCNT = {}, + SSE42 = dict(flags="-msse4.2"), + AVX = dict(flags="-mavx"), + F16C = {}, + XOP = dict(disable="Intel Compiler doesn't support it"), + FMA4 = dict(disable="Intel Compiler doesn't support it"), + # Intel Compiler doesn't support AVX2 or FMA3 independently + FMA3 = dict( + implies="F16C AVX2", flags="-march=core-avx2" + ), + AVX2 = dict(implies="FMA3", flags="-march=core-avx2"), + # Intel Compiler doesn't support AVX512F or AVX512CD independently + AVX512F = dict( + implies="AVX2 AVX512CD", flags="-march=common-avx512" + ), + AVX512CD = dict( + implies="AVX2 AVX512F", flags="-march=common-avx512" + ), + AVX512_KNL = dict(flags="-xKNL"), + AVX512_KNM = dict(flags="-xKNM"), + AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"), + AVX512_CLX = dict(flags="-xCASCADELAKE"), + AVX512_CNL = dict(flags="-xCANNONLAKE"), + AVX512_ICL = dict(flags="-xICELAKE-CLIENT"), + AVX512_SPR = dict(disable="Not supported yet") + ) + if on_x86 and self.cc_is_iccw: return dict( + SSE = dict(flags="/arch:SSE"), + SSE2 = dict(flags="/arch:SSE2"), + SSE3 = dict(flags="/arch:SSE3"), + SSSE3 = dict(flags="/arch:SSSE3"), + SSE41 = dict(flags="/arch:SSE4.1"), + POPCNT = {}, + SSE42 = dict(flags="/arch:SSE4.2"), + AVX = dict(flags="/arch:AVX"), + F16C = {}, + XOP = dict(disable="Intel Compiler doesn't support it"), + FMA4 = dict(disable="Intel Compiler doesn't support it"), + # Intel Compiler doesn't support FMA3 or AVX2 independently + FMA3 = dict( + implies="F16C AVX2", flags="/arch:CORE-AVX2" + ), + AVX2 = dict( + implies="FMA3", flags="/arch:CORE-AVX2" + ), + # Intel Compiler doesn't support AVX512F or AVX512CD independently + AVX512F = dict( + implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512" + ), + AVX512CD = dict( + implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512" + ), + AVX512_KNL = dict(flags="/Qx:KNL"), + AVX512_KNM = dict(flags="/Qx:KNM"), + AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"), + AVX512_CLX = dict(flags="/Qx:CASCADELAKE"), + AVX512_CNL = dict(flags="/Qx:CANNONLAKE"), + AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT"), + AVX512_SPR = dict(disable="Not supported yet") + ) + if on_x86 and self.cc_is_msvc: return dict( + SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {}, + SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {}, + SSE3 = {}, + SSSE3 = {}, + SSE41 = {}, + POPCNT = dict(headers="nmmintrin.h"), + SSE42 = {}, + AVX = dict(flags="/arch:AVX"), + F16C = {}, + XOP = dict(headers="ammintrin.h"), + FMA4 = dict(headers="ammintrin.h"), + # MSVC doesn't support FMA3 or AVX2 independently + FMA3 = dict( + implies="F16C AVX2", flags="/arch:AVX2" + ), + AVX2 = dict( + implies="F16C FMA3", flags="/arch:AVX2" + ), + # MSVC doesn't support AVX512F or AVX512CD independently, + # always generate instructions belong to (VL/VW/DQ) + AVX512F = dict( + implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512" + ), + AVX512CD = dict( + implies="AVX512F AVX512_SKX", flags="/arch:AVX512" + ), + AVX512_KNL = dict( + disable="MSVC compiler doesn't support it" + ), + AVX512_KNM = dict( + disable="MSVC compiler doesn't support it" + ), + AVX512_SKX = dict(flags="/arch:AVX512"), + AVX512_CLX = {}, + AVX512_CNL = {}, + AVX512_ICL = {}, + AVX512_SPR= dict( + disable="MSVC compiler doesn't support it" + ) + ) + + on_power = self.cc_on_ppc64le or self.cc_on_ppc64 + if on_power: + partial = dict( + VSX = dict( + implies=("VSX2" if self.cc_on_ppc64le else ""), + flags="-mvsx" + ), + VSX2 = dict( + flags="-mcpu=power8", implies_detect=False + ), + VSX3 = dict( + flags="-mcpu=power9 -mtune=power9", implies_detect=False + ), + VSX4 = dict( + flags="-mcpu=power10 -mtune=power10", implies_detect=False + ) + ) + if self.cc_is_clang: + partial["VSX"]["flags"] = "-maltivec -mvsx" + partial["VSX2"]["flags"] = "-mcpu=power8" + partial["VSX3"]["flags"] = "-mcpu=power9" + partial["VSX4"]["flags"] = "-mcpu=power10" + + return partial + + on_zarch = self.cc_on_s390x + if on_zarch: + partial = dict( + VX = dict( + flags="-march=arch11 -mzvector" + ), + VXE = dict( + flags="-march=arch12", implies_detect=False + ), + VXE2 = dict( + flags="-march=arch13", implies_detect=False + ) + ) + + return partial + + + if self.cc_on_aarch64 and is_unix: return dict( + NEON = dict( + implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True + ), + NEON_FP16 = dict( + implies="NEON NEON_VFPV4 ASIMD", autovec=True + ), + NEON_VFPV4 = dict( + implies="NEON NEON_FP16 ASIMD", autovec=True + ), + ASIMD = dict( + implies="NEON NEON_FP16 NEON_VFPV4", autovec=True + ), + ASIMDHP = dict( + flags="-march=armv8.2-a+fp16" + ), + ASIMDDP = dict( + flags="-march=armv8.2-a+dotprod" + ), + ASIMDFHM = dict( + flags="-march=armv8.2-a+fp16fml" + ), + ) + if self.cc_on_armhf and is_unix: return dict( + NEON = dict( + flags="-mfpu=neon" + ), + NEON_FP16 = dict( + flags="-mfpu=neon-fp16 -mfp16-format=ieee" + ), + NEON_VFPV4 = dict( + flags="-mfpu=neon-vfpv4", + ), + ASIMD = dict( + flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd", + ), + ASIMDHP = dict( + flags="-march=armv8.2-a+fp16" + ), + ASIMDDP = dict( + flags="-march=armv8.2-a+dotprod", + ), + ASIMDFHM = dict( + flags="-march=armv8.2-a+fp16fml" + ) + ) + # TODO: ARM MSVC + return {} + + def __init__(self): + if self.conf_tmp_path is None: + import shutil + import tempfile + tmp = tempfile.mkdtemp() + def rm_temp(): + try: + shutil.rmtree(tmp) + except OSError: + pass + atexit.register(rm_temp) + self.conf_tmp_path = tmp + + if self.conf_cache_factors is None: + self.conf_cache_factors = [ + os.path.getmtime(__file__), + self.conf_nocache + ] + +class _Distutils: + """A helper class that provides a collection of fundamental methods + implemented in a top of Python and NumPy Distutils. + + The idea behind this class is to gather all methods that it may + need to override in case of reuse 'CCompilerOpt' in environment + different than of what NumPy has. + + Parameters + ---------- + ccompiler : `CCompiler` + The generate instance that returned from `distutils.ccompiler.new_compiler()`. + """ + def __init__(self, ccompiler): + self._ccompiler = ccompiler + + def dist_compile(self, sources, flags, ccompiler=None, **kwargs): + """Wrap CCompiler.compile()""" + assert(isinstance(sources, list)) + assert(isinstance(flags, list)) + flags = kwargs.pop("extra_postargs", []) + flags + if not ccompiler: + ccompiler = self._ccompiler + + return ccompiler.compile(sources, extra_postargs=flags, **kwargs) + + def dist_test(self, source, flags, macros=[]): + """Return True if 'CCompiler.compile()' able to compile + a source file with certain flags. + """ + assert(isinstance(source, str)) + from distutils.errors import CompileError + cc = self._ccompiler; + bk_spawn = getattr(cc, 'spawn', None) + if bk_spawn: + cc_type = getattr(self._ccompiler, "compiler_type", "") + if cc_type in ("msvc",): + setattr(cc, 'spawn', self._dist_test_spawn_paths) + else: + setattr(cc, 'spawn', self._dist_test_spawn) + test = False + try: + self.dist_compile( + [source], flags, macros=macros, output_dir=self.conf_tmp_path + ) + test = True + except CompileError as e: + self.dist_log(str(e), stderr=True) + if bk_spawn: + setattr(cc, 'spawn', bk_spawn) + return test + + def dist_info(self): + """ + Return a tuple containing info about (platform, compiler, extra_args), + required by the abstract class '_CCompiler' for discovering the + platform environment. This is also used as a cache factor in order + to detect any changes happening from outside. + """ + if hasattr(self, "_dist_info"): + return self._dist_info + + cc_type = getattr(self._ccompiler, "compiler_type", '') + if cc_type in ("intelem", "intelemw"): + platform = "x86_64" + elif cc_type in ("intel", "intelw", "intele"): + platform = "x86" + else: + from distutils.util import get_platform + platform = get_platform() + + cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", '')) + if not cc_type or cc_type == "unix": + if hasattr(cc_info, "__iter__"): + compiler = cc_info[0] + else: + compiler = str(cc_info) + else: + compiler = cc_type + + if hasattr(cc_info, "__iter__") and len(cc_info) > 1: + extra_args = ' '.join(cc_info[1:]) + else: + extra_args = os.environ.get("CFLAGS", "") + extra_args += os.environ.get("CPPFLAGS", "") + + self._dist_info = (platform, compiler, extra_args) + return self._dist_info + + @staticmethod + def dist_error(*args): + """Raise a compiler error""" + from distutils.errors import CompileError + raise CompileError(_Distutils._dist_str(*args)) + + @staticmethod + def dist_fatal(*args): + """Raise a distutils error""" + from distutils.errors import DistutilsError + raise DistutilsError(_Distutils._dist_str(*args)) + + @staticmethod + def dist_log(*args, stderr=False): + """Print a console message""" + from numpy.distutils import log + out = _Distutils._dist_str(*args) + if stderr: + log.warn(out) + else: + log.info(out) + + @staticmethod + def dist_load_module(name, path): + """Load a module from file, required by the abstract class '_Cache'.""" + from .misc_util import exec_mod_from_location + try: + return exec_mod_from_location(name, path) + except Exception as e: + _Distutils.dist_log(e, stderr=True) + return None + + @staticmethod + def _dist_str(*args): + """Return a string to print by log and errors.""" + def to_str(arg): + if not isinstance(arg, str) and hasattr(arg, '__iter__'): + ret = [] + for a in arg: + ret.append(to_str(a)) + return '('+ ' '.join(ret) + ')' + return str(arg) + + stack = inspect.stack()[2] + start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno) + out = ' '.join([ + to_str(a) + for a in (*args,) + ]) + return start + out + + def _dist_test_spawn_paths(self, cmd, display=None): + """ + Fix msvc SDK ENV path same as distutils do + without it we get c1: fatal error C1356: unable to find mspdbcore.dll + """ + if not hasattr(self._ccompiler, "_paths"): + self._dist_test_spawn(cmd) + return + old_path = os.getenv("path") + try: + os.environ["path"] = self._ccompiler._paths + self._dist_test_spawn(cmd) + finally: + os.environ["path"] = old_path + + _dist_warn_regex = re.compile( + # intel and msvc compilers don't raise + # fatal errors when flags are wrong or unsupported + ".*(" + "warning D9002|" # msvc, it should be work with any language. + "invalid argument for option" # intel + ").*" + ) + @staticmethod + def _dist_test_spawn(cmd, display=None): + try: + o = subprocess.check_output(cmd, stderr=subprocess.STDOUT, + text=True) + if o and re.match(_Distutils._dist_warn_regex, o): + _Distutils.dist_error( + "Flags in command", cmd ,"aren't supported by the compiler" + ", output -> \n%s" % o + ) + except subprocess.CalledProcessError as exc: + o = exc.output + s = exc.returncode + except OSError as e: + o = e + s = 127 + else: + return None + _Distutils.dist_error( + "Command", cmd, "failed with exit status %d output -> \n%s" % ( + s, o + )) + +_share_cache = {} +class _Cache: + """An abstract class handles caching functionality, provides two + levels of caching, in-memory by share instances attributes among + each other and by store attributes into files. + + **Note**: + any attributes that start with ``_`` or ``conf_`` will be ignored. + + Parameters + ---------- + cache_path : str or None + The path of cache file, if None then cache in file will disabled. + + *factors : + The caching factors that need to utilize next to `conf_cache_factors`. + + Attributes + ---------- + cache_private : set + Hold the attributes that need be skipped from "in-memory cache". + + cache_infile : bool + Utilized during initializing this class, to determine if the cache was able + to loaded from the specified cache path in 'cache_path'. + """ + + # skip attributes from cache + _cache_ignore = re.compile("^(_|conf_)") + + def __init__(self, cache_path=None, *factors): + self.cache_me = {} + self.cache_private = set() + self.cache_infile = False + self._cache_path = None + + if self.conf_nocache: + self.dist_log("cache is disabled by `Config`") + return + + self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors) + self._cache_path = cache_path + if cache_path: + if os.path.exists(cache_path): + self.dist_log("load cache from file ->", cache_path) + cache_mod = self.dist_load_module("cache", cache_path) + if not cache_mod: + self.dist_log( + "unable to load the cache file as a module", + stderr=True + ) + elif not hasattr(cache_mod, "hash") or \ + not hasattr(cache_mod, "data"): + self.dist_log("invalid cache file", stderr=True) + elif self._cache_hash == cache_mod.hash: + self.dist_log("hit the file cache") + for attr, val in cache_mod.data.items(): + setattr(self, attr, val) + self.cache_infile = True + else: + self.dist_log("miss the file cache") + + if not self.cache_infile: + other_cache = _share_cache.get(self._cache_hash) + if other_cache: + self.dist_log("hit the memory cache") + for attr, val in other_cache.__dict__.items(): + if attr in other_cache.cache_private or \ + re.match(self._cache_ignore, attr): + continue + setattr(self, attr, val) + + _share_cache[self._cache_hash] = self + atexit.register(self.cache_flush) + + def __del__(self): + for h, o in _share_cache.items(): + if o == self: + _share_cache.pop(h) + break + + def cache_flush(self): + """ + Force update the cache. + """ + if not self._cache_path: + return + # TODO: don't write if the cache doesn't change + self.dist_log("write cache to path ->", self._cache_path) + cdict = self.__dict__.copy() + for attr in self.__dict__.keys(): + if re.match(self._cache_ignore, attr): + cdict.pop(attr) + + d = os.path.dirname(self._cache_path) + if not os.path.exists(d): + os.makedirs(d) + + repr_dict = pprint.pformat(cdict, compact=True) + with open(self._cache_path, "w") as f: + f.write(textwrap.dedent("""\ + # AUTOGENERATED DON'T EDIT + # Please make changes to the code generator \ + (distutils/ccompiler_opt.py) + hash = {} + data = \\ + """).format(self._cache_hash)) + f.write(repr_dict) + + def cache_hash(self, *factors): + # is there a built-in non-crypto hash? + # sdbm + chash = 0 + for f in factors: + for char in str(f): + chash = ord(char) + (chash << 6) + (chash << 16) - chash + chash &= 0xFFFFFFFF + return chash + + @staticmethod + def me(cb): + """ + A static method that can be treated as a decorator to + dynamically cache certain methods. + """ + def cache_wrap_me(self, *args, **kwargs): + # good for normal args + cache_key = str(( + cb.__name__, *args, *kwargs.keys(), *kwargs.values() + )) + if cache_key in self.cache_me: + return self.cache_me[cache_key] + ccb = cb(self, *args, **kwargs) + self.cache_me[cache_key] = ccb + return ccb + return cache_wrap_me + +class _CCompiler: + """A helper class for `CCompilerOpt` containing all utilities that + related to the fundamental compiler's functions. + + Attributes + ---------- + cc_on_x86 : bool + True when the target architecture is 32-bit x86 + cc_on_x64 : bool + True when the target architecture is 64-bit x86 + cc_on_ppc64 : bool + True when the target architecture is 64-bit big-endian powerpc + cc_on_ppc64le : bool + True when the target architecture is 64-bit litle-endian powerpc + cc_on_s390x : bool + True when the target architecture is IBM/ZARCH on linux + cc_on_armhf : bool + True when the target architecture is 32-bit ARMv7+ + cc_on_aarch64 : bool + True when the target architecture is 64-bit Armv8-a+ + cc_on_noarch : bool + True when the target architecture is unknown or not supported + cc_is_gcc : bool + True if the compiler is GNU or + if the compiler is unknown + cc_is_clang : bool + True if the compiler is Clang + cc_is_icc : bool + True if the compiler is Intel compiler (unix like) + cc_is_iccw : bool + True if the compiler is Intel compiler (msvc like) + cc_is_nocc : bool + True if the compiler isn't supported directly, + Note: that cause a fail-back to gcc + cc_has_debug : bool + True if the compiler has debug flags + cc_has_native : bool + True if the compiler has native flags + cc_noopt : bool + True if the compiler has definition 'DISABLE_OPT*', + or 'cc_on_noarch' is True + cc_march : str + The target architecture name, or "unknown" if + the architecture isn't supported + cc_name : str + The compiler name, or "unknown" if the compiler isn't supported + cc_flags : dict + Dictionary containing the initialized flags of `_Config.conf_cc_flags` + """ + def __init__(self): + if hasattr(self, "cc_is_cached"): + return + # attr regex compiler-expression + detect_arch = ( + ("cc_on_x64", ".*(x|x86_|amd)64.*", ""), + ("cc_on_x86", ".*(win32|x86|i386|i686).*", ""), + ("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*|.*powerpc.*", + "defined(__powerpc64__) && " + "defined(__LITTLE_ENDIAN__)"), + ("cc_on_ppc64", ".*(powerpc|ppc).*|.*powerpc.*", + "defined(__powerpc64__) && " + "defined(__BIG_ENDIAN__)"), + ("cc_on_aarch64", ".*(aarch64|arm64).*", ""), + ("cc_on_armhf", ".*arm.*", "defined(__ARM_ARCH_7__) || " + "defined(__ARM_ARCH_7A__)"), + ("cc_on_s390x", ".*s390x.*", ""), + # undefined platform + ("cc_on_noarch", "", ""), + ) + detect_compiler = ( + ("cc_is_gcc", r".*(gcc|gnu\-g).*", ""), + ("cc_is_clang", ".*clang.*", ""), + # intel msvc like + ("cc_is_iccw", ".*(intelw|intelemw|iccw).*", ""), + ("cc_is_icc", ".*(intel|icc).*", ""), # intel unix like + ("cc_is_msvc", ".*msvc.*", ""), + ("cc_is_fcc", ".*fcc.*", ""), + # undefined compiler will be treat it as gcc + ("cc_is_nocc", "", ""), + ) + detect_args = ( + ("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""), + ("cc_has_native", + ".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", ""), + # in case if the class run with -DNPY_DISABLE_OPTIMIZATION + ("cc_noopt", ".*DISABLE_OPT.*", ""), + ) + + dist_info = self.dist_info() + platform, compiler_info, extra_args = dist_info + # set False to all attrs + for section in (detect_arch, detect_compiler, detect_args): + for attr, rgex, cexpr in section: + setattr(self, attr, False) + + for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)): + for attr, rgex, cexpr in detect: + if rgex and not re.match(rgex, searchin, re.IGNORECASE): + continue + if cexpr and not self.cc_test_cexpr(cexpr): + continue + setattr(self, attr, True) + break + + for attr, rgex, cexpr in detect_args: + if rgex and not re.match(rgex, extra_args, re.IGNORECASE): + continue + if cexpr and not self.cc_test_cexpr(cexpr): + continue + setattr(self, attr, True) + + if self.cc_on_noarch: + self.dist_log( + "unable to detect CPU architecture which lead to disable the optimization. " + f"check dist_info:<<\n{dist_info}\n>>", + stderr=True + ) + self.cc_noopt = True + + if self.conf_noopt: + self.dist_log("Optimization is disabled by the Config", stderr=True) + self.cc_noopt = True + + if self.cc_is_nocc: + """ + mingw can be treated as a gcc, and also xlc even if it based on clang, + but still has the same gcc optimization flags. + """ + self.dist_log( + "unable to detect compiler type which leads to treating it as GCC. " + "this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC." + f"check dist_info:<<\n{dist_info}\n>>", + stderr=True + ) + self.cc_is_gcc = True + + self.cc_march = "unknown" + for arch in ("x86", "x64", "ppc64", "ppc64le", + "armhf", "aarch64", "s390x"): + if getattr(self, "cc_on_" + arch): + self.cc_march = arch + break + + self.cc_name = "unknown" + for name in ("gcc", "clang", "iccw", "icc", "msvc", "fcc"): + if getattr(self, "cc_is_" + name): + self.cc_name = name + break + + self.cc_flags = {} + compiler_flags = self.conf_cc_flags.get(self.cc_name) + if compiler_flags is None: + self.dist_fatal( + "undefined flag for compiler '%s', " + "leave an empty dict instead" % self.cc_name + ) + for name, flags in compiler_flags.items(): + self.cc_flags[name] = nflags = [] + if flags: + assert(isinstance(flags, str)) + flags = flags.split() + for f in flags: + if self.cc_test_flags([f]): + nflags.append(f) + + self.cc_is_cached = True + + @_Cache.me + def cc_test_flags(self, flags): + """ + Returns True if the compiler supports 'flags'. + """ + assert(isinstance(flags, list)) + self.dist_log("testing flags", flags) + test_path = os.path.join(self.conf_check_path, "test_flags.c") + test = self.dist_test(test_path, flags) + if not test: + self.dist_log("testing failed", stderr=True) + return test + + @_Cache.me + def cc_test_cexpr(self, cexpr, flags=[]): + """ + Same as the above but supports compile-time expressions. + """ + self.dist_log("testing compiler expression", cexpr) + test_path = os.path.join(self.conf_tmp_path, "npy_dist_test_cexpr.c") + with open(test_path, "w") as fd: + fd.write(textwrap.dedent(f"""\ + #if !({cexpr}) + #error "unsupported expression" + #endif + int dummy; + """)) + test = self.dist_test(test_path, flags) + if not test: + self.dist_log("testing failed", stderr=True) + return test + + def cc_normalize_flags(self, flags): + """ + Remove the conflicts that caused due gathering implied features flags. + + Parameters + ---------- + 'flags' list, compiler flags + flags should be sorted from the lowest to the highest interest. + + Returns + ------- + list, filtered from any conflicts. + + Examples + -------- + >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod']) + ['armv8.2-a+fp16+dotprod'] + + >>> self.cc_normalize_flags( + ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2'] + ) + ['-march=core-avx2'] + """ + assert(isinstance(flags, list)) + if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc: + return self._cc_normalize_unix(flags) + + if self.cc_is_msvc or self.cc_is_iccw: + return self._cc_normalize_win(flags) + return flags + + _cc_normalize_unix_mrgx = re.compile( + # 1- to check the highest of + r"^(-mcpu=|-march=|-x[A-Z0-9\-])" + ) + _cc_normalize_unix_frgx = re.compile( + # 2- to remove any flags starts with + # -march, -mcpu, -x(INTEL) and '-m' without '=' + r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|" + # exclude: + r"(?:-mzvector)" + ) + _cc_normalize_unix_krgx = re.compile( + # 3- keep only the highest of + r"^(-mfpu|-mtune)" + ) + _cc_normalize_arch_ver = re.compile( + r"[0-9.]" + ) + def _cc_normalize_unix(self, flags): + def ver_flags(f): + # arch ver subflag + # -march=armv8.2-a+fp16fml + tokens = f.split('+') + ver = float('0' + ''.join( + re.findall(self._cc_normalize_arch_ver, tokens[0]) + )) + return ver, tokens[0], tokens[1:] + + if len(flags) <= 1: + return flags + # get the highest matched flag + for i, cur_flag in enumerate(reversed(flags)): + if not re.match(self._cc_normalize_unix_mrgx, cur_flag): + continue + lower_flags = flags[:-(i+1)] + upper_flags = flags[-i:] + filtered = list(filter( + self._cc_normalize_unix_frgx.search, lower_flags + )) + # gather subflags + ver, arch, subflags = ver_flags(cur_flag) + if ver > 0 and len(subflags) > 0: + for xflag in lower_flags: + xver, _, xsubflags = ver_flags(xflag) + if ver == xver: + subflags = xsubflags + subflags + cur_flag = arch + '+' + '+'.join(subflags) + + flags = filtered + [cur_flag] + if i > 0: + flags += upper_flags + break + + # to remove overridable flags + final_flags = [] + matched = set() + for f in reversed(flags): + match = re.match(self._cc_normalize_unix_krgx, f) + if not match: + pass + elif match[0] in matched: + continue + else: + matched.add(match[0]) + final_flags.insert(0, f) + return final_flags + + _cc_normalize_win_frgx = re.compile( + r"^(?!(/arch\:|/Qx\:))" + ) + _cc_normalize_win_mrgx = re.compile( + r"^(/arch|/Qx:)" + ) + def _cc_normalize_win(self, flags): + for i, f in enumerate(reversed(flags)): + if not re.match(self._cc_normalize_win_mrgx, f): + continue + i += 1 + return list(filter( + self._cc_normalize_win_frgx.search, flags[:-i] + )) + flags[-i:] + return flags + +class _Feature: + """A helper class for `CCompilerOpt` that managing CPU features. + + Attributes + ---------- + feature_supported : dict + Dictionary containing all CPU features that supported + by the platform, according to the specified values in attribute + `_Config.conf_features` and `_Config.conf_features_partial()` + + feature_min : set + The minimum support of CPU features, according to + the specified values in attribute `_Config.conf_min_features`. + """ + def __init__(self): + if hasattr(self, "feature_is_cached"): + return + self.feature_supported = pfeatures = self.conf_features_partial() + for feature_name in list(pfeatures.keys()): + feature = pfeatures[feature_name] + cfeature = self.conf_features[feature_name] + feature.update({ + k:v for k,v in cfeature.items() if k not in feature + }) + disabled = feature.get("disable") + if disabled is not None: + pfeatures.pop(feature_name) + self.dist_log( + "feature '%s' is disabled," % feature_name, + disabled, stderr=True + ) + continue + # list is used internally for these options + for option in ( + "implies", "group", "detect", "headers", "flags", "extra_checks" + ) : + oval = feature.get(option) + if isinstance(oval, str): + feature[option] = oval.split() + + self.feature_min = set() + min_f = self.conf_min_features.get(self.cc_march, "") + for F in min_f.upper().split(): + if F in self.feature_supported: + self.feature_min.add(F) + + self.feature_is_cached = True + + def feature_names(self, names=None, force_flags=None, macros=[]): + """ + Returns a set of CPU feature names that supported by platform and the **C** compiler. + + Parameters + ---------- + names : sequence or None, optional + Specify certain CPU features to test it against the **C** compiler. + if None(default), it will test all current supported features. + **Note**: feature names must be in upper-case. + + force_flags : list or None, optional + If None(default), default compiler flags for every CPU feature will + be used during the test. + + macros : list of tuples, optional + A list of C macro definitions. + """ + assert( + names is None or ( + not isinstance(names, str) and + hasattr(names, "__iter__") + ) + ) + assert(force_flags is None or isinstance(force_flags, list)) + if names is None: + names = self.feature_supported.keys() + supported_names = set() + for f in names: + if self.feature_is_supported( + f, force_flags=force_flags, macros=macros + ): + supported_names.add(f) + return supported_names + + def feature_is_exist(self, name): + """ + Returns True if a certain feature is exist and covered within + ``_Config.conf_features``. + + Parameters + ---------- + 'name': str + feature name in uppercase. + """ + assert(name.isupper()) + return name in self.conf_features + + def feature_sorted(self, names, reverse=False): + """ + Sort a list of CPU features ordered by the lowest interest. + + Parameters + ---------- + 'names': sequence + sequence of supported feature names in uppercase. + 'reverse': bool, optional + If true, the sorted features is reversed. (highest interest) + + Returns + ------- + list, sorted CPU features + """ + def sort_cb(k): + if isinstance(k, str): + return self.feature_supported[k]["interest"] + # multiple features + rank = max([self.feature_supported[f]["interest"] for f in k]) + # FIXME: that's not a safe way to increase the rank for + # multi targets + rank += len(k) -1 + return rank + return sorted(names, reverse=reverse, key=sort_cb) + + def feature_implies(self, names, keep_origins=False): + """ + Return a set of CPU features that implied by 'names' + + Parameters + ---------- + names : str or sequence of str + CPU feature name(s) in uppercase. + + keep_origins : bool + if False(default) then the returned set will not contain any + features from 'names'. This case happens only when two features + imply each other. + + Examples + -------- + >>> self.feature_implies("SSE3") + {'SSE', 'SSE2'} + >>> self.feature_implies("SSE2") + {'SSE'} + >>> self.feature_implies("SSE2", keep_origins=True) + # 'SSE2' found here since 'SSE' and 'SSE2' imply each other + {'SSE', 'SSE2'} + """ + def get_implies(name, _caller=set()): + implies = set() + d = self.feature_supported[name] + for i in d.get("implies", []): + implies.add(i) + if i in _caller: + # infinity recursive guard since + # features can imply each other + continue + _caller.add(name) + implies = implies.union(get_implies(i, _caller)) + return implies + + if isinstance(names, str): + implies = get_implies(names) + names = [names] + else: + assert(hasattr(names, "__iter__")) + implies = set() + for n in names: + implies = implies.union(get_implies(n)) + if not keep_origins: + implies.difference_update(names) + return implies + + def feature_implies_c(self, names): + """same as feature_implies() but combining 'names'""" + if isinstance(names, str): + names = set((names,)) + else: + names = set(names) + return names.union(self.feature_implies(names)) + + def feature_ahead(self, names): + """ + Return list of features in 'names' after remove any + implied features and keep the origins. + + Parameters + ---------- + 'names': sequence + sequence of CPU feature names in uppercase. + + Returns + ------- + list of CPU features sorted as-is 'names' + + Examples + -------- + >>> self.feature_ahead(["SSE2", "SSE3", "SSE41"]) + ["SSE41"] + # assume AVX2 and FMA3 implies each other and AVX2 + # is the highest interest + >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) + ["AVX2"] + # assume AVX2 and FMA3 don't implies each other + >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) + ["AVX2", "FMA3"] + """ + assert( + not isinstance(names, str) + and hasattr(names, '__iter__') + ) + implies = self.feature_implies(names, keep_origins=True) + ahead = [n for n in names if n not in implies] + if len(ahead) == 0: + # return the highest interested feature + # if all features imply each other + ahead = self.feature_sorted(names, reverse=True)[:1] + return ahead + + def feature_untied(self, names): + """ + same as 'feature_ahead()' but if both features implied each other + and keep the highest interest. + + Parameters + ---------- + 'names': sequence + sequence of CPU feature names in uppercase. + + Returns + ------- + list of CPU features sorted as-is 'names' + + Examples + -------- + >>> self.feature_untied(["SSE2", "SSE3", "SSE41"]) + ["SSE2", "SSE3", "SSE41"] + # assume AVX2 and FMA3 implies each other + >>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"]) + ["SSE2", "SSE3", "SSE41", "AVX2"] + """ + assert( + not isinstance(names, str) + and hasattr(names, '__iter__') + ) + final = [] + for n in names: + implies = self.feature_implies(n) + tied = [ + nn for nn in final + if nn in implies and n in self.feature_implies(nn) + ] + if tied: + tied = self.feature_sorted(tied + [n]) + if n not in tied[1:]: + continue + final.remove(tied[:1][0]) + final.append(n) + return final + + def feature_get_til(self, names, keyisfalse): + """ + same as `feature_implies_c()` but stop collecting implied + features when feature's option that provided through + parameter 'keyisfalse' is False, also sorting the returned + features. + """ + def til(tnames): + # sort from highest to lowest interest then cut if "key" is False + tnames = self.feature_implies_c(tnames) + tnames = self.feature_sorted(tnames, reverse=True) + for i, n in enumerate(tnames): + if not self.feature_supported[n].get(keyisfalse, True): + tnames = tnames[:i+1] + break + return tnames + + if isinstance(names, str) or len(names) <= 1: + names = til(names) + # normalize the sort + names.reverse() + return names + + names = self.feature_ahead(names) + names = {t for n in names for t in til(n)} + return self.feature_sorted(names) + + def feature_detect(self, names): + """ + Return a list of CPU features that required to be detected + sorted from the lowest to highest interest. + """ + names = self.feature_get_til(names, "implies_detect") + detect = [] + for n in names: + d = self.feature_supported[n] + detect += d.get("detect", d.get("group", [n])) + return detect + + @_Cache.me + def feature_flags(self, names): + """ + Return a list of CPU features flags sorted from the lowest + to highest interest. + """ + names = self.feature_sorted(self.feature_implies_c(names)) + flags = [] + for n in names: + d = self.feature_supported[n] + f = d.get("flags", []) + if not f or not self.cc_test_flags(f): + continue + flags += f + return self.cc_normalize_flags(flags) + + @_Cache.me + def feature_test(self, name, force_flags=None, macros=[]): + """ + Test a certain CPU feature against the compiler through its own + check file. + + Parameters + ---------- + name : str + Supported CPU feature name. + + force_flags : list or None, optional + If None(default), the returned flags from `feature_flags()` + will be used. + + macros : list of tuples, optional + A list of C macro definitions. + """ + if force_flags is None: + force_flags = self.feature_flags(name) + + self.dist_log( + "testing feature '%s' with flags (%s)" % ( + name, ' '.join(force_flags) + )) + # Each CPU feature must have C source code contains at + # least one intrinsic or instruction related to this feature. + test_path = os.path.join( + self.conf_check_path, "cpu_%s.c" % name.lower() + ) + if not os.path.exists(test_path): + self.dist_fatal("feature test file is not exist", test_path) + + test = self.dist_test( + test_path, force_flags + self.cc_flags["werror"], macros=macros + ) + if not test: + self.dist_log("testing failed", stderr=True) + return test + + @_Cache.me + def feature_is_supported(self, name, force_flags=None, macros=[]): + """ + Check if a certain CPU feature is supported by the platform and compiler. + + Parameters + ---------- + name : str + CPU feature name in uppercase. + + force_flags : list or None, optional + If None(default), default compiler flags for every CPU feature will + be used during test. + + macros : list of tuples, optional + A list of C macro definitions. + """ + assert(name.isupper()) + assert(force_flags is None or isinstance(force_flags, list)) + + supported = name in self.feature_supported + if supported: + for impl in self.feature_implies(name): + if not self.feature_test(impl, force_flags, macros=macros): + return False + if not self.feature_test(name, force_flags, macros=macros): + return False + return supported + + @_Cache.me + def feature_can_autovec(self, name): + """ + check if the feature can be auto-vectorized by the compiler + """ + assert(isinstance(name, str)) + d = self.feature_supported[name] + can = d.get("autovec", None) + if can is None: + valid_flags = [ + self.cc_test_flags([f]) for f in d.get("flags", []) + ] + can = valid_flags and any(valid_flags) + return can + + @_Cache.me + def feature_extra_checks(self, name): + """ + Return a list of supported extra checks after testing them against + the compiler. + + Parameters + ---------- + names : str + CPU feature name in uppercase. + """ + assert isinstance(name, str) + d = self.feature_supported[name] + extra_checks = d.get("extra_checks", []) + if not extra_checks: + return [] + + self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks) + flags = self.feature_flags(name) + available = [] + not_available = [] + for chk in extra_checks: + test_path = os.path.join( + self.conf_check_path, "extra_%s.c" % chk.lower() + ) + if not os.path.exists(test_path): + self.dist_fatal("extra check file does not exist", test_path) + + is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"]) + if is_supported: + available.append(chk) + else: + not_available.append(chk) + + if not_available: + self.dist_log("testing failed for checks", not_available, stderr=True) + return available + + + def feature_c_preprocessor(self, feature_name, tabs=0): + """ + Generate C preprocessor definitions and include headers of a CPU feature. + + Parameters + ---------- + 'feature_name': str + CPU feature name in uppercase. + 'tabs': int + if > 0, align the generated strings to the right depend on number of tabs. + + Returns + ------- + str, generated C preprocessor + + Examples + -------- + >>> self.feature_c_preprocessor("SSE3") + /** SSE3 **/ + #define NPY_HAVE_SSE3 1 + #include + """ + assert(feature_name.isupper()) + feature = self.feature_supported.get(feature_name) + assert(feature is not None) + + prepr = [ + "/** %s **/" % feature_name, + "#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name) + ] + prepr += [ + "#include <%s>" % h for h in feature.get("headers", []) + ] + + extra_defs = feature.get("group", []) + extra_defs += self.feature_extra_checks(feature_name) + for edef in extra_defs: + # Guard extra definitions in case of duplicate with + # another feature + prepr += [ + "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef), + "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef), + "#endif", + ] + + if tabs > 0: + prepr = [('\t'*tabs) + l for l in prepr] + return '\n'.join(prepr) + +class _Parse: + """A helper class that parsing main arguments of `CCompilerOpt`, + also parsing configuration statements in dispatch-able sources. + + Parameters + ---------- + cpu_baseline : str or None + minimal set of required CPU features or special options. + + cpu_dispatch : str or None + dispatched set of additional CPU features or special options. + + Special options can be: + - **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features` + - **MAX**: Enables all supported CPU features by the Compiler and platform. + - **NATIVE**: Enables all CPU features that supported by the current machine. + - **NONE**: Enables nothing + - **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**. + NOTE: operand + is only added for nominal reason. + + NOTES: + - Case-insensitive among all CPU features and special options. + - Comma or space can be used as a separator. + - If the CPU feature is not supported by the user platform or compiler, + it will be skipped rather than raising a fatal error. + - Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features + - 'cpu_baseline' force enables implied features. + + Attributes + ---------- + parse_baseline_names : list + Final CPU baseline's feature names(sorted from low to high) + parse_baseline_flags : list + Compiler flags of baseline features + parse_dispatch_names : list + Final CPU dispatch-able feature names(sorted from low to high) + parse_target_groups : dict + Dictionary containing initialized target groups that configured + through class attribute `conf_target_groups`. + + The key is represent the group name and value is a tuple + contains three items : + - bool, True if group has the 'baseline' option. + - list, list of CPU features. + - list, list of extra compiler flags. + + """ + def __init__(self, cpu_baseline, cpu_dispatch): + self._parse_policies = dict( + # POLICY NAME, (HAVE, NOT HAVE, [DEB]) + KEEP_BASELINE = ( + None, self._parse_policy_not_keepbase, + [] + ), + KEEP_SORT = ( + self._parse_policy_keepsort, + self._parse_policy_not_keepsort, + [] + ), + MAXOPT = ( + self._parse_policy_maxopt, None, + [] + ), + WERROR = ( + self._parse_policy_werror, None, + [] + ), + AUTOVEC = ( + self._parse_policy_autovec, None, + ["MAXOPT"] + ) + ) + if hasattr(self, "parse_is_cached"): + return + + self.parse_baseline_names = [] + self.parse_baseline_flags = [] + self.parse_dispatch_names = [] + self.parse_target_groups = {} + + if self.cc_noopt: + # skip parsing baseline and dispatch args and keep parsing target groups + cpu_baseline = cpu_dispatch = None + + self.dist_log("check requested baseline") + if cpu_baseline is not None: + cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline) + baseline_names = self.feature_names(cpu_baseline) + self.parse_baseline_flags = self.feature_flags(baseline_names) + self.parse_baseline_names = self.feature_sorted( + self.feature_implies_c(baseline_names) + ) + + self.dist_log("check requested dispatch-able features") + if cpu_dispatch is not None: + cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch) + cpu_dispatch = { + f for f in cpu_dispatch_ + if f not in self.parse_baseline_names + } + conflict_baseline = cpu_dispatch_.difference(cpu_dispatch) + self.parse_dispatch_names = self.feature_sorted( + self.feature_names(cpu_dispatch) + ) + if len(conflict_baseline) > 0: + self.dist_log( + "skip features", conflict_baseline, "since its part of baseline" + ) + + self.dist_log("initialize targets groups") + for group_name, tokens in self.conf_target_groups.items(): + self.dist_log("parse target group", group_name) + GROUP_NAME = group_name.upper() + if not tokens or not tokens.strip(): + # allow empty groups, useful in case if there's a need + # to disable certain group since '_parse_target_tokens()' + # requires at least one valid target + self.parse_target_groups[GROUP_NAME] = ( + False, [], [] + ) + continue + has_baseline, features, extra_flags = \ + self._parse_target_tokens(tokens) + self.parse_target_groups[GROUP_NAME] = ( + has_baseline, features, extra_flags + ) + + self.parse_is_cached = True + + def parse_targets(self, source): + """ + Fetch and parse configuration statements that required for + defining the targeted CPU features, statements should be declared + in the top of source in between **C** comment and start + with a special mark **@targets**. + + Configuration statements are sort of keywords representing + CPU features names, group of statements and policies, combined + together to determine the required optimization. + + Parameters + ---------- + source : str + the path of **C** source file. + + Returns + ------- + - bool, True if group has the 'baseline' option + - list, list of CPU features + - list, list of extra compiler flags + """ + self.dist_log("looking for '@targets' inside -> ", source) + # get lines between /*@targets and */ + with open(source) as fd: + tokens = "" + max_to_reach = 1000 # good enough, isn't? + start_with = "@targets" + start_pos = -1 + end_with = "*/" + end_pos = -1 + for current_line, line in enumerate(fd): + if current_line == max_to_reach: + self.dist_fatal("reached the max of lines") + break + if start_pos == -1: + start_pos = line.find(start_with) + if start_pos == -1: + continue + start_pos += len(start_with) + tokens += line + end_pos = line.find(end_with) + if end_pos != -1: + end_pos += len(tokens) - len(line) + break + + if start_pos == -1: + self.dist_fatal("expected to find '%s' within a C comment" % start_with) + if end_pos == -1: + self.dist_fatal("expected to end with '%s'" % end_with) + + tokens = tokens[start_pos:end_pos] + return self._parse_target_tokens(tokens) + + _parse_regex_arg = re.compile(r'\s|,|([+-])') + def _parse_arg_features(self, arg_name, req_features): + if not isinstance(req_features, str): + self.dist_fatal("expected a string in '%s'" % arg_name) + + final_features = set() + # space and comma can be used as a separator + tokens = list(filter(None, re.split(self._parse_regex_arg, req_features))) + append = True # append is the default + for tok in tokens: + if tok[0] in ("#", "$"): + self.dist_fatal( + arg_name, "target groups and policies " + "aren't allowed from arguments, " + "only from dispatch-able sources" + ) + if tok == '+': + append = True + continue + if tok == '-': + append = False + continue + + TOK = tok.upper() # we use upper-case internally + features_to = set() + if TOK == "NONE": + pass + elif TOK == "NATIVE": + native = self.cc_flags["native"] + if not native: + self.dist_fatal(arg_name, + "native option isn't supported by the compiler" + ) + features_to = self.feature_names( + force_flags=native, macros=[("DETECT_FEATURES", 1)] + ) + elif TOK == "MAX": + features_to = self.feature_supported.keys() + elif TOK == "MIN": + features_to = self.feature_min + else: + if TOK in self.feature_supported: + features_to.add(TOK) + else: + if not self.feature_is_exist(TOK): + self.dist_fatal(arg_name, + ", '%s' isn't a known feature or option" % tok + ) + if append: + final_features = final_features.union(features_to) + else: + final_features = final_features.difference(features_to) + + append = True # back to default + + return final_features + + _parse_regex_target = re.compile(r'\s|[*,/]|([()])') + def _parse_target_tokens(self, tokens): + assert(isinstance(tokens, str)) + final_targets = [] # to keep it sorted as specified + extra_flags = [] + has_baseline = False + + skipped = set() + policies = set() + multi_target = None + + tokens = list(filter(None, re.split(self._parse_regex_target, tokens))) + if not tokens: + self.dist_fatal("expected one token at least") + + for tok in tokens: + TOK = tok.upper() + ch = tok[0] + if ch in ('+', '-'): + self.dist_fatal( + "+/- are 'not' allowed from target's groups or @targets, " + "only from cpu_baseline and cpu_dispatch parms" + ) + elif ch == '$': + if multi_target is not None: + self.dist_fatal( + "policies aren't allowed inside multi-target '()'" + ", only CPU features" + ) + policies.add(self._parse_token_policy(TOK)) + elif ch == '#': + if multi_target is not None: + self.dist_fatal( + "target groups aren't allowed inside multi-target '()'" + ", only CPU features" + ) + has_baseline, final_targets, extra_flags = \ + self._parse_token_group(TOK, has_baseline, final_targets, extra_flags) + elif ch == '(': + if multi_target is not None: + self.dist_fatal("unclosed multi-target, missing ')'") + multi_target = set() + elif ch == ')': + if multi_target is None: + self.dist_fatal("multi-target opener '(' wasn't found") + targets = self._parse_multi_target(multi_target) + if targets is None: + skipped.add(tuple(multi_target)) + else: + if len(targets) == 1: + targets = targets[0] + if targets and targets not in final_targets: + final_targets.append(targets) + multi_target = None # back to default + else: + if TOK == "BASELINE": + if multi_target is not None: + self.dist_fatal("baseline isn't allowed inside multi-target '()'") + has_baseline = True + continue + + if multi_target is not None: + multi_target.add(TOK) + continue + + if not self.feature_is_exist(TOK): + self.dist_fatal("invalid target name '%s'" % TOK) + + is_enabled = ( + TOK in self.parse_baseline_names or + TOK in self.parse_dispatch_names + ) + if is_enabled: + if TOK not in final_targets: + final_targets.append(TOK) + continue + + skipped.add(TOK) + + if multi_target is not None: + self.dist_fatal("unclosed multi-target, missing ')'") + if skipped: + self.dist_log( + "skip targets", skipped, + "not part of baseline or dispatch-able features" + ) + + final_targets = self.feature_untied(final_targets) + + # add polices dependencies + for p in list(policies): + _, _, deps = self._parse_policies[p] + for d in deps: + if d in policies: + continue + self.dist_log( + "policy '%s' force enables '%s'" % ( + p, d + )) + policies.add(d) + + # release policies filtrations + for p, (have, nhave, _) in self._parse_policies.items(): + func = None + if p in policies: + func = have + self.dist_log("policy '%s' is ON" % p) + else: + func = nhave + if not func: + continue + has_baseline, final_targets, extra_flags = func( + has_baseline, final_targets, extra_flags + ) + + return has_baseline, final_targets, extra_flags + + def _parse_token_policy(self, token): + """validate policy token""" + if len(token) <= 1 or token[-1:] == token[0]: + self.dist_fatal("'$' must stuck in the begin of policy name") + token = token[1:] + if token not in self._parse_policies: + self.dist_fatal( + "'%s' is an invalid policy name, available policies are" % token, + self._parse_policies.keys() + ) + return token + + def _parse_token_group(self, token, has_baseline, final_targets, extra_flags): + """validate group token""" + if len(token) <= 1 or token[-1:] == token[0]: + self.dist_fatal("'#' must stuck in the begin of group name") + + token = token[1:] + ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get( + token, (False, None, []) + ) + if gtargets is None: + self.dist_fatal( + "'%s' is an invalid target group name, " % token + \ + "available target groups are", + self.parse_target_groups.keys() + ) + if ghas_baseline: + has_baseline = True + # always keep sorting as specified + final_targets += [f for f in gtargets if f not in final_targets] + extra_flags += [f for f in gextra_flags if f not in extra_flags] + return has_baseline, final_targets, extra_flags + + def _parse_multi_target(self, targets): + """validate multi targets that defined between parentheses()""" + # remove any implied features and keep the origins + if not targets: + self.dist_fatal("empty multi-target '()'") + if not all([ + self.feature_is_exist(tar) for tar in targets + ]) : + self.dist_fatal("invalid target name in multi-target", targets) + if not all([ + ( + tar in self.parse_baseline_names or + tar in self.parse_dispatch_names + ) + for tar in targets + ]) : + return None + targets = self.feature_ahead(targets) + if not targets: + return None + # force sort multi targets, so it can be comparable + targets = self.feature_sorted(targets) + targets = tuple(targets) # hashable + return targets + + def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags): + """skip all baseline features""" + skipped = [] + for tar in final_targets[:]: + is_base = False + if isinstance(tar, str): + is_base = tar in self.parse_baseline_names + else: + # multi targets + is_base = all([ + f in self.parse_baseline_names + for f in tar + ]) + if is_base: + skipped.append(tar) + final_targets.remove(tar) + + if skipped: + self.dist_log("skip baseline features", skipped) + + return has_baseline, final_targets, extra_flags + + def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags): + """leave a notice that $keep_sort is on""" + self.dist_log( + "policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n" + "are 'not' sorted depend on the highest interest but" + "as specified in the dispatch-able source or the extra group" + ) + return has_baseline, final_targets, extra_flags + + def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags): + """sorted depend on the highest interest""" + final_targets = self.feature_sorted(final_targets, reverse=True) + return has_baseline, final_targets, extra_flags + + def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags): + """append the compiler optimization flags""" + if self.cc_has_debug: + self.dist_log("debug mode is detected, policy 'maxopt' is skipped.") + elif self.cc_noopt: + self.dist_log("optimization is disabled, policy 'maxopt' is skipped.") + else: + flags = self.cc_flags["opt"] + if not flags: + self.dist_log( + "current compiler doesn't support optimization flags, " + "policy 'maxopt' is skipped", stderr=True + ) + else: + extra_flags += flags + return has_baseline, final_targets, extra_flags + + def _parse_policy_werror(self, has_baseline, final_targets, extra_flags): + """force warnings to treated as errors""" + flags = self.cc_flags["werror"] + if not flags: + self.dist_log( + "current compiler doesn't support werror flags, " + "warnings will 'not' treated as errors", stderr=True + ) + else: + self.dist_log("compiler warnings are treated as errors") + extra_flags += flags + return has_baseline, final_targets, extra_flags + + def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags): + """skip features that has no auto-vectorized support by compiler""" + skipped = [] + for tar in final_targets[:]: + if isinstance(tar, str): + can = self.feature_can_autovec(tar) + else: # multiple target + can = all([ + self.feature_can_autovec(t) + for t in tar + ]) + if not can: + final_targets.remove(tar) + skipped.append(tar) + + if skipped: + self.dist_log("skip non auto-vectorized features", skipped) + + return has_baseline, final_targets, extra_flags + +class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse): + """ + A helper class for `CCompiler` aims to provide extra build options + to effectively control of compiler optimizations that are directly + related to CPU features. + """ + def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None): + _Config.__init__(self) + _Distutils.__init__(self, ccompiler) + _Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch) + _CCompiler.__init__(self) + _Feature.__init__(self) + if not self.cc_noopt and self.cc_has_native: + self.dist_log( + "native flag is specified through environment variables. " + "force cpu-baseline='native'" + ) + cpu_baseline = "native" + _Parse.__init__(self, cpu_baseline, cpu_dispatch) + # keep the requested features untouched, need it later for report + # and trace purposes + self._requested_baseline = cpu_baseline + self._requested_dispatch = cpu_dispatch + # key is the dispatch-able source and value is a tuple + # contains two items (has_baseline[boolean], dispatched-features[list]) + self.sources_status = getattr(self, "sources_status", {}) + # every instance should has a separate one + self.cache_private.add("sources_status") + # set it at the end to make sure the cache writing was done after init + # this class + self.hit_cache = hasattr(self, "hit_cache") + + def is_cached(self): + """ + Returns True if the class loaded from the cache file + """ + return self.cache_infile and self.hit_cache + + def cpu_baseline_flags(self): + """ + Returns a list of final CPU baseline compiler flags + """ + return self.parse_baseline_flags + + def cpu_baseline_names(self): + """ + return a list of final CPU baseline feature names + """ + return self.parse_baseline_names + + def cpu_dispatch_names(self): + """ + return a list of final CPU dispatch feature names + """ + return self.parse_dispatch_names + + def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs): + """ + Compile one or more dispatch-able sources and generates object files, + also generates abstract C config headers and macros that + used later for the final runtime dispatching process. + + The mechanism behind it is to takes each source file that specified + in 'sources' and branching it into several files depend on + special configuration statements that must be declared in the + top of each source which contains targeted CPU features, + then it compiles every branched source with the proper compiler flags. + + Parameters + ---------- + sources : list + Must be a list of dispatch-able sources file paths, + and configuration statements must be declared inside + each file. + + src_dir : str + Path of parent directory for the generated headers and wrapped sources. + If None(default) the files will generated in-place. + + ccompiler : CCompiler + Distutils `CCompiler` instance to be used for compilation. + If None (default), the provided instance during the initialization + will be used instead. + + **kwargs : any + Arguments to pass on to the `CCompiler.compile()` + + Returns + ------- + list : generated object files + + Raises + ------ + CompileError + Raises by `CCompiler.compile()` on compiling failure. + DistutilsError + Some errors during checking the sanity of configuration statements. + + See Also + -------- + parse_targets : + Parsing the configuration statements of dispatch-able sources. + """ + to_compile = {} + baseline_flags = self.cpu_baseline_flags() + include_dirs = kwargs.setdefault("include_dirs", []) + + for src in sources: + output_dir = os.path.dirname(src) + if src_dir: + if not output_dir.startswith(src_dir): + output_dir = os.path.join(src_dir, output_dir) + if output_dir not in include_dirs: + # To allow including the generated config header(*.dispatch.h) + # by the dispatch-able sources + include_dirs.append(output_dir) + + has_baseline, targets, extra_flags = self.parse_targets(src) + nochange = self._generate_config(output_dir, src, targets, has_baseline) + for tar in targets: + tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange) + flags = tuple(extra_flags + self.feature_flags(tar)) + to_compile.setdefault(flags, []).append(tar_src) + + if has_baseline: + flags = tuple(extra_flags + baseline_flags) + to_compile.setdefault(flags, []).append(src) + + self.sources_status[src] = (has_baseline, targets) + + # For these reasons, the sources are compiled in a separate loop: + # - Gathering all sources with the same flags to benefit from + # the parallel compiling as much as possible. + # - To generate all config headers of the dispatchable sources, + # before the compilation in case if there are dependency relationships + # among them. + objects = [] + for flags, srcs in to_compile.items(): + objects += self.dist_compile( + srcs, list(flags), ccompiler=ccompiler, **kwargs + ) + return objects + + def generate_dispatch_header(self, header_path): + """ + Generate the dispatch header which contains the #definitions and headers + for platform-specific instruction-sets for the enabled CPU baseline and + dispatch-able features. + + Its highly recommended to take a look at the generated header + also the generated source files via `try_dispatch()` + in order to get the full picture. + """ + self.dist_log("generate CPU dispatch header: (%s)" % header_path) + + baseline_names = self.cpu_baseline_names() + dispatch_names = self.cpu_dispatch_names() + baseline_len = len(baseline_names) + dispatch_len = len(dispatch_names) + + header_dir = os.path.dirname(header_path) + if not os.path.exists(header_dir): + self.dist_log( + f"dispatch header dir {header_dir} does not exist, creating it", + stderr=True + ) + os.makedirs(header_dir) + + with open(header_path, 'w') as f: + baseline_calls = ' \\\n'.join([ + ( + "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" + ) % (self.conf_c_prefix, f) + for f in baseline_names + ]) + dispatch_calls = ' \\\n'.join([ + ( + "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" + ) % (self.conf_c_prefix, f) + for f in dispatch_names + ]) + f.write(textwrap.dedent("""\ + /* + * AUTOGENERATED DON'T EDIT + * Please make changes to the code generator (distutils/ccompiler_opt.py) + */ + #define {pfx}WITH_CPU_BASELINE "{baseline_str}" + #define {pfx}WITH_CPU_DISPATCH "{dispatch_str}" + #define {pfx}WITH_CPU_BASELINE_N {baseline_len} + #define {pfx}WITH_CPU_DISPATCH_N {dispatch_len} + #define {pfx}WITH_CPU_EXPAND_(X) X + #define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\ + {baseline_calls} + #define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\ + {dispatch_calls} + """).format( + pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names), + dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len, + dispatch_len=dispatch_len, baseline_calls=baseline_calls, + dispatch_calls=dispatch_calls + )) + baseline_pre = '' + for name in baseline_names: + baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n' + + dispatch_pre = '' + for name in dispatch_names: + dispatch_pre += textwrap.dedent("""\ + #ifdef {pfx}CPU_TARGET_{name} + {pre} + #endif /*{pfx}CPU_TARGET_{name}*/ + """).format( + pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor( + name, tabs=1 + )) + + f.write(textwrap.dedent("""\ + /******* baseline features *******/ + {baseline_pre} + /******* dispatch features *******/ + {dispatch_pre} + """).format( + pfx=self.conf_c_prefix_, baseline_pre=baseline_pre, + dispatch_pre=dispatch_pre + )) + + def report(self, full=False): + report = [] + platform_rows = [] + baseline_rows = [] + dispatch_rows = [] + report.append(("Platform", platform_rows)) + report.append(("", "")) + report.append(("CPU baseline", baseline_rows)) + report.append(("", "")) + report.append(("CPU dispatch", dispatch_rows)) + + ########## platform ########## + platform_rows.append(("Architecture", ( + "unsupported" if self.cc_on_noarch else self.cc_march) + )) + platform_rows.append(("Compiler", ( + "unix-like" if self.cc_is_nocc else self.cc_name) + )) + ########## baseline ########## + if self.cc_noopt: + baseline_rows.append(("Requested", "optimization disabled")) + else: + baseline_rows.append(("Requested", repr(self._requested_baseline))) + + baseline_names = self.cpu_baseline_names() + baseline_rows.append(( + "Enabled", (' '.join(baseline_names) if baseline_names else "none") + )) + baseline_flags = self.cpu_baseline_flags() + baseline_rows.append(( + "Flags", (' '.join(baseline_flags) if baseline_flags else "none") + )) + extra_checks = [] + for name in baseline_names: + extra_checks += self.feature_extra_checks(name) + baseline_rows.append(( + "Extra checks", (' '.join(extra_checks) if extra_checks else "none") + )) + + ########## dispatch ########## + if self.cc_noopt: + baseline_rows.append(("Requested", "optimization disabled")) + else: + dispatch_rows.append(("Requested", repr(self._requested_dispatch))) + + dispatch_names = self.cpu_dispatch_names() + dispatch_rows.append(( + "Enabled", (' '.join(dispatch_names) if dispatch_names else "none") + )) + ########## Generated ########## + # TODO: + # - collect object names from 'try_dispatch()' + # then get size of each object and printed + # - give more details about the features that not + # generated due compiler support + # - find a better output's design. + # + target_sources = {} + for source, (_, targets) in self.sources_status.items(): + for tar in targets: + target_sources.setdefault(tar, []).append(source) + + if not full or not target_sources: + generated = "" + for tar in self.feature_sorted(target_sources): + sources = target_sources[tar] + name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) + generated += name + "[%d] " % len(sources) + dispatch_rows.append(("Generated", generated[:-1] if generated else "none")) + else: + dispatch_rows.append(("Generated", '')) + for tar in self.feature_sorted(target_sources): + sources = target_sources[tar] + pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) + flags = ' '.join(self.feature_flags(tar)) + implies = ' '.join(self.feature_sorted(self.feature_implies(tar))) + detect = ' '.join(self.feature_detect(tar)) + extra_checks = [] + for name in ((tar,) if isinstance(tar, str) else tar): + extra_checks += self.feature_extra_checks(name) + extra_checks = (' '.join(extra_checks) if extra_checks else "none") + + dispatch_rows.append(('', '')) + dispatch_rows.append((pretty_name, implies)) + dispatch_rows.append(("Flags", flags)) + dispatch_rows.append(("Extra checks", extra_checks)) + dispatch_rows.append(("Detect", detect)) + for src in sources: + dispatch_rows.append(("", src)) + + ############################### + # TODO: add support for 'markdown' format + text = [] + secs_len = [len(secs) for secs, _ in report] + cols_len = [len(col) for _, rows in report for col, _ in rows] + tab = ' ' * 2 + pad = max(max(secs_len), max(cols_len)) + for sec, rows in report: + if not sec: + text.append("") # empty line + continue + sec += ' ' * (pad - len(sec)) + text.append(sec + tab + ': ') + for col, val in rows: + col += ' ' * (pad - len(col)) + text.append(tab + col + ': ' + val) + + return '\n'.join(text) + + def _wrap_target(self, output_dir, dispatch_src, target, nochange=False): + assert(isinstance(target, (str, tuple))) + if isinstance(target, str): + ext_name = target_name = target + else: + # multi-target + ext_name = '.'.join(target) + target_name = '__'.join(target) + + wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src)) + wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower()) + if nochange and os.path.exists(wrap_path): + return wrap_path + + self.dist_log("wrap dispatch-able target -> ", wrap_path) + # sorting for readability + features = self.feature_sorted(self.feature_implies_c(target)) + target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_ + target_defs = [target_join + f for f in features] + target_defs = '\n'.join(target_defs) + + with open(wrap_path, "w") as fd: + fd.write(textwrap.dedent("""\ + /** + * AUTOGENERATED DON'T EDIT + * Please make changes to the code generator \ + (distutils/ccompiler_opt.py) + */ + #define {pfx}CPU_TARGET_MODE + #define {pfx}CPU_TARGET_CURRENT {target_name} + {target_defs} + #include "{path}" + """).format( + pfx=self.conf_c_prefix_, target_name=target_name, + path=os.path.abspath(dispatch_src), target_defs=target_defs + )) + return wrap_path + + def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False): + config_path = os.path.basename(dispatch_src) + config_path = os.path.splitext(config_path)[0] + '.h' + config_path = os.path.join(output_dir, config_path) + # check if targets didn't change to avoid recompiling + cache_hash = self.cache_hash(targets, has_baseline) + try: + with open(config_path) as f: + last_hash = f.readline().split("cache_hash:") + if len(last_hash) == 2 and int(last_hash[1]) == cache_hash: + return True + except OSError: + pass + + os.makedirs(os.path.dirname(config_path), exist_ok=True) + + self.dist_log("generate dispatched config -> ", config_path) + dispatch_calls = [] + for tar in targets: + if isinstance(tar, str): + target_name = tar + else: # multi target + target_name = '__'.join([t for t in tar]) + req_detect = self.feature_detect(tar) + req_detect = '&&'.join([ + "CHK(%s)" % f for f in req_detect + ]) + dispatch_calls.append( + "\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % ( + self.conf_c_prefix_, req_detect, target_name + )) + dispatch_calls = ' \\\n'.join(dispatch_calls) + + if has_baseline: + baseline_calls = ( + "\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))" + ) % self.conf_c_prefix_ + else: + baseline_calls = '' + + with open(config_path, "w") as fd: + fd.write(textwrap.dedent("""\ + // cache_hash:{cache_hash} + /** + * AUTOGENERATED DON'T EDIT + * Please make changes to the code generator (distutils/ccompiler_opt.py) + */ + #ifndef {pfx}CPU_DISPATCH_EXPAND_ + #define {pfx}CPU_DISPATCH_EXPAND_(X) X + #endif + #undef {pfx}CPU_DISPATCH_BASELINE_CALL + #undef {pfx}CPU_DISPATCH_CALL + #define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\ + {baseline_calls} + #define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\ + {dispatch_calls} + """).format( + pfx=self.conf_c_prefix_, baseline_calls=baseline_calls, + dispatch_calls=dispatch_calls, cache_hash=cache_hash + )) + return False + +def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs): + """ + Create a new instance of 'CCompilerOpt' and generate the dispatch header + which contains the #definitions and headers of platform-specific instruction-sets for + the enabled CPU baseline and dispatch-able features. + + Parameters + ---------- + compiler : CCompiler instance + dispatch_hpath : str + path of the dispatch header + + **kwargs: passed as-is to `CCompilerOpt(...)` + Returns + ------- + new instance of CCompilerOpt + """ + opt = CCompilerOpt(compiler, **kwargs) + if not os.path.exists(dispatch_hpath) or not opt.is_cached(): + opt.generate_dispatch_header(dispatch_hpath) + return opt diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_asimd.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_asimd.c new file mode 100644 index 0000000000000000000000000000000000000000..fa7056ba3989b61f11e19887c42862afc1aee83c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_asimd.c @@ -0,0 +1,27 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + float *src = (float*)argv[argc-1]; + float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]); + /* MAXMIN */ + int ret = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0); + ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0); + /* ROUNDING */ + ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0); +#ifdef __aarch64__ + { + double *src2 = (double*)argv[argc-1]; + float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]); + /* MAXMIN */ + ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0); + ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0); + /* ROUNDING */ + ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0); + } +#endif + return ret; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_asimddp.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_asimddp.c new file mode 100644 index 0000000000000000000000000000000000000000..2a7492d94c9ce42cb0eb1ed3c6b32819818baa58 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_asimddp.c @@ -0,0 +1,16 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + unsigned char *src = (unsigned char*)argv[argc-1]; + uint8x16_t v1 = vdupq_n_u8(src[0]), v2 = vdupq_n_u8(src[1]); + uint32x4_t va = vdupq_n_u32(3); + int ret = (int)vgetq_lane_u32(vdotq_u32(va, v1, v2), 0); +#ifdef __aarch64__ + ret += (int)vgetq_lane_u32(vdotq_laneq_u32(va, v1, v2, 0), 0); +#endif + return ret; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_asimdfhm.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_asimdfhm.c new file mode 100644 index 0000000000000000000000000000000000000000..3b4be56ba78be75d3af337c814ab8ae07f832cb6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_asimdfhm.c @@ -0,0 +1,19 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + float16_t *src = (float16_t*)argv[argc-1]; + float *src2 = (float*)argv[argc-2]; + float16x8_t vhp = vdupq_n_f16(src[0]); + float16x4_t vlhp = vdup_n_f16(src[1]); + float32x4_t vf = vdupq_n_f32(src2[0]); + float32x2_t vlf = vdup_n_f32(src2[1]); + + int ret = (int)vget_lane_f32(vfmlal_low_f16(vlf, vlhp, vlhp), 0); + ret += (int)vgetq_lane_f32(vfmlslq_high_f16(vf, vhp, vhp), 0); + + return ret; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_asimdhp.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_asimdhp.c new file mode 100644 index 0000000000000000000000000000000000000000..a78aff92f780582b8e695dec5f74fbbe535afa64 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_asimdhp.c @@ -0,0 +1,15 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + float16_t *src = (float16_t*)argv[argc-1]; + float16x8_t vhp = vdupq_n_f16(src[0]); + float16x4_t vlhp = vdup_n_f16(src[1]); + + int ret = (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0); + ret += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0); + return ret; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx.c new file mode 100644 index 0000000000000000000000000000000000000000..544ce2a6f1b5655eef3ae5771c1497c3634fc898 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX__ + #error "HOST/ARCH doesn't support AVX" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m256 a = _mm256_add_ps(_mm256_loadu_ps((const float*)argv[argc-1]), _mm256_loadu_ps((const float*)argv[1])); + return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx2.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx2.c new file mode 100644 index 0000000000000000000000000000000000000000..aabe6ee789bee3fca8ff4f17b5b274e47af6f856 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx2.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX2__ + #error "HOST/ARCH doesn't support AVX2" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1])); + return _mm_cvtsi128_si32(_mm256_castsi256_si128(a)); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_clx.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_clx.c new file mode 100644 index 0000000000000000000000000000000000000000..eeeb07de6b5e1c1e664c0d3a3eaeb9052b344892 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_clx.c @@ -0,0 +1,22 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX512VNNI__ + #error "HOST/ARCH doesn't support CascadeLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + /* VNNI */ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + a = _mm512_dpbusd_epi32(a, _mm512_setzero_si512(), a); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c new file mode 100644 index 0000000000000000000000000000000000000000..c15c34dfdca21aa703b256ad709777c73344bf89 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c @@ -0,0 +1,24 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__) + #error "HOST/ARCH doesn't support CannonLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + /* IFMA */ + a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512()); + /* VMBI */ + a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_icl.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_icl.c new file mode 100644 index 0000000000000000000000000000000000000000..816a68cac0faa45da7f26ef14cb46830e53eeb13 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_icl.c @@ -0,0 +1,26 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512BITALG__) || !defined(__AVX512VPOPCNTDQ__) + #error "HOST/ARCH doesn't support IceLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + /* VBMI2 */ + a = _mm512_shrdv_epi64(a, a, _mm512_setzero_si512()); + /* BITLAG */ + a = _mm512_popcnt_epi8(a); + /* VPOPCNTDQ */ + a = _mm512_popcnt_epi64(a); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_knl.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_knl.c new file mode 100644 index 0000000000000000000000000000000000000000..64953a718ba71d0812225fc27479b32b3eec0287 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_knl.c @@ -0,0 +1,25 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512ER__) || !defined(__AVX512PF__) + #error "HOST/ARCH doesn't support Knights Landing AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + int base[128]={}; + __m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]); + /* ER */ + __m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad)); + /* PF */ + _mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1); + return base[0]; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_knm.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_knm.c new file mode 100644 index 0000000000000000000000000000000000000000..edfa06cbd2977c144bbcc1941ecb4ab447c93ead --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_knm.c @@ -0,0 +1,30 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__) + #error "HOST/ARCH doesn't support Knights Mill AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + __m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]); + + /* 4FMAPS */ + b = _mm512_4fmadd_ps(b, b, b, b, b, NULL); + /* 4VNNIW */ + a = _mm512_4dpwssd_epi32(a, a, a, a, a, NULL); + /* VPOPCNTDQ */ + a = _mm512_popcnt_epi64(a); + + a = _mm512_add_epi32(a, _mm512_castps_si512(b)); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_skx.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_skx.c new file mode 100644 index 0000000000000000000000000000000000000000..fd007cc7e81684dfdac69b4475fc9150d247e147 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_skx.c @@ -0,0 +1,26 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__) + #error "HOST/ARCH doesn't support SkyLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1])); + /* VL */ + __m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1)); + /* DQ */ + __m512i b = _mm512_broadcast_i32x8(a); + /* BW */ + b = _mm512_abs_epi16(b); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(b)); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_spr.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_spr.c new file mode 100644 index 0000000000000000000000000000000000000000..13735a1b00e5ad2a6f7797a5aabce525c6eef7a7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512_spr.c @@ -0,0 +1,26 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512FP16__) + #error "HOST/ARCH doesn't support Sapphire Rapids AVX512FP16 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ +/* clang has a bug regarding our spr coode, see gh-23730. */ +#if __clang__ +#error +#endif + __m512h a = _mm512_loadu_ph((void*)argv[argc-1]); + __m512h temp = _mm512_fmadd_ph(a, a, a); + _mm512_storeu_ph((void*)(argv[argc-1]), temp); + return 0; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512cd.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512cd.c new file mode 100644 index 0000000000000000000000000000000000000000..09e546007c33442e39efd1a1e27d18e87259ec95 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512cd.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX512CD__ + #error "HOST/ARCH doesn't support AVX512CD" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_lzcnt_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1])); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512f.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512f.c new file mode 100644 index 0000000000000000000000000000000000000000..cbe502d3b25e5159e083a148313168eeb2a72a80 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_avx512f.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX512F__ + #error "HOST/ARCH doesn't support AVX512F" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1])); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_f16c.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_f16c.c new file mode 100644 index 0000000000000000000000000000000000000000..b359595a9a0ff22ebdde100018286f7c947a83ab --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_f16c.c @@ -0,0 +1,22 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __F16C__ + #error "HOST/ARCH doesn't support F16C" + #endif +#endif + +#include +#include + +int main(int argc, char **argv) +{ + __m128 a = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1])); + __m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2])); + return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8))); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_fma3.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_fma3.c new file mode 100644 index 0000000000000000000000000000000000000000..03bf0f470eed36bbc581bab269feb3addc48bd3e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_fma3.c @@ -0,0 +1,22 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__FMA__) && !defined(__AVX2__) + #error "HOST/ARCH doesn't support FMA3" + #endif +#endif + +#include +#include + +int main(int argc, char **argv) +{ + __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]); + a = _mm256_fmadd_ps(a, a, a); + return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_fma4.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_fma4.c new file mode 100644 index 0000000000000000000000000000000000000000..76e03bb916b76fa9b4367421c009632636dbb7f4 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_fma4.c @@ -0,0 +1,13 @@ +#include +#ifdef _MSC_VER + #include +#else + #include +#endif + +int main(int argc, char **argv) +{ + __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]); + a = _mm256_macc_ps(a, a, a); + return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_neon.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_neon.c new file mode 100644 index 0000000000000000000000000000000000000000..9ab8084b38c04db202e1d47e3cfab026a24c62d4 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_neon.c @@ -0,0 +1,19 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + // passing from untraced pointers to avoid optimizing out any constants + // so we can test against the linker. + float *src = (float*)argv[argc-1]; + float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]); + int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0); +#ifdef __aarch64__ + double *src2 = (double*)argv[argc-2]; + float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]); + ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0); +#endif + return ret; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_neon_fp16.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_neon_fp16.c new file mode 100644 index 0000000000000000000000000000000000000000..58c9287a2ab74406d19b5a13742c07a186b88a61 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_neon_fp16.c @@ -0,0 +1,11 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + short *src = (short*)argv[argc-1]; + float32x4_t v_z4 = vcvt_f32_f16((float16x4_t)vld1_s16(src)); + return (int)vgetq_lane_f32(v_z4, 0); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c new file mode 100644 index 0000000000000000000000000000000000000000..35cc7df26758f0236f2c0345f449795883fc0855 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c @@ -0,0 +1,21 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + float *src = (float*)argv[argc-1]; + float32x4_t v1 = vdupq_n_f32(src[0]); + float32x4_t v2 = vdupq_n_f32(src[1]); + float32x4_t v3 = vdupq_n_f32(src[2]); + int ret = (int)vgetq_lane_f32(vfmaq_f32(v1, v2, v3), 0); +#ifdef __aarch64__ + double *src2 = (double*)argv[argc-2]; + float64x2_t vd1 = vdupq_n_f64(src2[0]); + float64x2_t vd2 = vdupq_n_f64(src2[1]); + float64x2_t vd3 = vdupq_n_f64(src2[2]); + ret += (int)vgetq_lane_f64(vfmaq_f64(vd1, vd2, vd3), 0); +#endif + return ret; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_popcnt.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_popcnt.c new file mode 100644 index 0000000000000000000000000000000000000000..3e54fe8068fcc69de2f764073e5951142105c20a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_popcnt.c @@ -0,0 +1,32 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__SSE4_2__) && !defined(__POPCNT__) + #error "HOST/ARCH doesn't support POPCNT" + #endif +#endif + +#ifdef _MSC_VER + #include +#else + #include +#endif + +int main(int argc, char **argv) +{ + // To make sure popcnt instructions are generated + // and been tested against the assembler + unsigned long long a = *((unsigned long long*)argv[argc-1]); + unsigned int b = *((unsigned int*)argv[argc-2]); + +#if defined(_M_X64) || defined(__x86_64__) + a = _mm_popcnt_u64(a); +#endif + b = _mm_popcnt_u32(b); + return (int)a + b; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse.c new file mode 100644 index 0000000000000000000000000000000000000000..063ccf738c0f62159cfd9262447a869492d9ac97 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE__ + #error "HOST/ARCH doesn't support SSE" + #endif +#endif + +#include + +int main(void) +{ + __m128 a = _mm_add_ps(_mm_setzero_ps(), _mm_setzero_ps()); + return (int)_mm_cvtss_f32(a); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse2.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse2.c new file mode 100644 index 0000000000000000000000000000000000000000..d88c89122b3b94129f35f04fb83191c052e85d59 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse2.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE2__ + #error "HOST/ARCH doesn't support SSE2" + #endif +#endif + +#include + +int main(void) +{ + __m128i a = _mm_add_epi16(_mm_setzero_si128(), _mm_setzero_si128()); + return _mm_cvtsi128_si32(a); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse3.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse3.c new file mode 100644 index 0000000000000000000000000000000000000000..54c5bbaa111a58c6cffc181648e35e5f86ebdb9f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse3.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE3__ + #error "HOST/ARCH doesn't support SSE3" + #endif +#endif + +#include + +int main(void) +{ + __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps()); + return (int)_mm_cvtss_f32(a); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse41.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse41.c new file mode 100644 index 0000000000000000000000000000000000000000..a0be9920451b8c3fa0fee24dcf6405332e028d07 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse41.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE4_1__ + #error "HOST/ARCH doesn't support SSE41" + #endif +#endif + +#include + +int main(void) +{ + __m128 a = _mm_floor_ps(_mm_setzero_ps()); + return (int)_mm_cvtss_f32(a); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse42.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse42.c new file mode 100644 index 0000000000000000000000000000000000000000..d3da06ab3dbeda8ac3232b65f000598aeaa9fbb8 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sse42.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE4_2__ + #error "HOST/ARCH doesn't support SSE42" + #endif +#endif + +#include + +int main(void) +{ + __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps()); + return (int)_mm_cvtss_f32(a); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_ssse3.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_ssse3.c new file mode 100644 index 0000000000000000000000000000000000000000..ad91d6a91050fea9c32310db5eba9c1259a40aea --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_ssse3.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSSE3__ + #error "HOST/ARCH doesn't support SSSE3" + #endif +#endif + +#include + +int main(void) +{ + __m128i a = _mm_hadd_epi16(_mm_setzero_si128(), _mm_setzero_si128()); + return (int)_mm_cvtsi128_si32(a); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sve.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sve.c new file mode 100644 index 0000000000000000000000000000000000000000..02b88315537b2711f1f5b9c99ba28350c8cd93c1 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_sve.c @@ -0,0 +1,14 @@ +#include + +int accumulate(svint64_t a, svint64_t b) { + svbool_t p = svptrue_b64(); + return svaddv(p, svmla_z(p, a, a, b)); +} + +int main(void) +{ + svbool_t p = svptrue_b64(); + svint64_t a = svdup_s64(1); + svint64_t b = svdup_s64(2); + return accumulate(a, b); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vsx.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vsx.c new file mode 100644 index 0000000000000000000000000000000000000000..9064998af7405eef530febe1c26a6bad1fb5e4b2 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vsx.c @@ -0,0 +1,21 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__)) + #define vsx_ld vec_vsx_ld + #define vsx_st vec_vsx_st +#else + #define vsx_ld vec_xl + #define vsx_st vec_xst +#endif + +int main(void) +{ + unsigned int zout[4]; + unsigned int z4[] = {0, 0, 0, 0}; + __vector unsigned int v_z4 = vsx_ld(0, z4); + vsx_st(v_z4, 0, zout); + return zout[0]; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vsx2.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vsx2.c new file mode 100644 index 0000000000000000000000000000000000000000..006a1938b0bb7586ad1395198413e4715e37bf88 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vsx2.c @@ -0,0 +1,13 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +typedef __vector unsigned long long v_uint64x2; + +int main(void) +{ + v_uint64x2 z2 = (v_uint64x2){0, 0}; + z2 = (v_uint64x2)vec_cmpeq(z2, z2); + return (int)vec_extract(z2, 0); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vsx3.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vsx3.c new file mode 100644 index 0000000000000000000000000000000000000000..dd255f8f34beb3c01ce335a3ac852297bc3b9470 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vsx3.c @@ -0,0 +1,13 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +typedef __vector unsigned int v_uint32x4; + +int main(void) +{ + v_uint32x4 z4 = (v_uint32x4){0, 0, 0, 0}; + z4 = vec_absd(z4, z4); + return (int)vec_extract(z4, 0); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vsx4.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vsx4.c new file mode 100644 index 0000000000000000000000000000000000000000..8760e71110073d308916c5f8858ff7fb95cde5d2 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vsx4.c @@ -0,0 +1,14 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +typedef __vector unsigned int v_uint32x4; + +int main(void) +{ + v_uint32x4 v1 = (v_uint32x4){2, 4, 8, 16}; + v_uint32x4 v2 = (v_uint32x4){2, 2, 2, 2}; + v_uint32x4 v3 = vec_mod(v1, v2); + return (int)vec_extractm(v3); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vx.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vx.c new file mode 100644 index 0000000000000000000000000000000000000000..5fc45cdcfe4d9aa6a9a63f8f1b7a33ec1131f3aa --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vx.c @@ -0,0 +1,16 @@ +#if (__VEC__ < 10301) || (__ARCH__ < 11) + #error VX not supported +#endif + +#include +int main(int argc, char **argv) +{ + __vector double x = vec_abs(vec_xl(argc, (double*)argv)); + __vector double y = vec_load_len((double*)argv, (unsigned int)argc); + + x = vec_round(vec_ceil(x) + vec_floor(y)); + __vector bool long long m = vec_cmpge(x, y); + __vector long long i = vec_signed(vec_sel(x, y, m)); + + return (int)vec_extract(i, 0); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vxe.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vxe.c new file mode 100644 index 0000000000000000000000000000000000000000..710182e8f5c3717389f6c509bb66e44dd4751aae --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vxe.c @@ -0,0 +1,25 @@ +#if (__VEC__ < 10302) || (__ARCH__ < 12) + #error VXE not supported +#endif + +#include +int main(int argc, char **argv) +{ + __vector float x = vec_nabs(vec_xl(argc, (float*)argv)); + __vector float y = vec_load_len((float*)argv, (unsigned int)argc); + + x = vec_round(vec_ceil(x) + vec_floor(y)); + __vector bool int m = vec_cmpge(x, y); + x = vec_sel(x, y, m); + + // need to test the existence of intrin "vflls" since vec_doublee + // is vec_doublee maps to wrong intrin "vfll". + // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100871 +#if defined(__GNUC__) && !defined(__clang__) + __vector long long i = vec_signed(__builtin_s390_vflls(x)); +#else + __vector long long i = vec_signed(vec_doublee(x)); +#endif + + return (int)vec_extract(i, 0); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vxe2.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vxe2.c new file mode 100644 index 0000000000000000000000000000000000000000..2e4a578ffebef71ad980a4951cee75d71ada74f6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_vxe2.c @@ -0,0 +1,21 @@ +#if (__VEC__ < 10303) || (__ARCH__ < 13) + #error VXE2 not supported +#endif + +#include + +int main(int argc, char **argv) +{ + int val; + __vector signed short large = { 'a', 'b', 'c', 'a', 'g', 'h', 'g', 'o' }; + __vector signed short search = { 'g', 'h', 'g', 'o' }; + __vector unsigned char len = { 0 }; + __vector unsigned char res = vec_search_string_cc(large, search, len, &val); + __vector float x = vec_xl(argc, (float*)argv); + __vector int i = vec_signed(x); + + i = vec_srdb(vec_sldb(i, i, 2), i, 3); + val += (int)vec_extract(res, 1); + val += vec_extract(i, 0); + return val; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_xop.c b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_xop.c new file mode 100644 index 0000000000000000000000000000000000000000..5097ca30b2b5f71b9253cada27da230fbd724e6c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/cpu_xop.c @@ -0,0 +1,12 @@ +#include +#ifdef _MSC_VER + #include +#else + #include +#endif + +int main(void) +{ + __m128i a = _mm_comge_epu32(_mm_setzero_si128(), _mm_setzero_si128()); + return _mm_cvtsi128_si32(a); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c b/phivenv/Lib/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c new file mode 100644 index 0000000000000000000000000000000000000000..121f574c1b7ddb4ef86d4ba2081a74c5a7c68309 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c @@ -0,0 +1,18 @@ +#include +/** + * Test BW mask operations due to: + * - MSVC has supported it since vs2019 see, + * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html + * - Clang >= v8.0 + * - GCC >= v7.1 + */ +int main(void) +{ + __mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1)); + m64 = _kor_mask64(m64, m64); + m64 = _kxor_mask64(m64, m64); + m64 = _cvtu64_mask64(_cvtmask64_u64(m64)); + m64 = _mm512_kunpackd(m64, m64); + m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64); + return (int)_cvtmask64_u64(m64); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c b/phivenv/Lib/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c new file mode 100644 index 0000000000000000000000000000000000000000..6e3c7a7c36642eb2a4ec30bfc61e51ff325d722e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c @@ -0,0 +1,16 @@ +#include +/** + * Test DQ mask operations due to: + * - MSVC has supported it since vs2019 see, + * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html + * - Clang >= v8.0 + * - GCC >= v7.1 + */ +int main(void) +{ + __mmask8 m8 = _mm512_cmpeq_epi64_mask(_mm512_set1_epi64(1), _mm512_set1_epi64(1)); + m8 = _kor_mask8(m8, m8); + m8 = _kxor_mask8(m8, m8); + m8 = _cvtu32_mask8(_cvtmask8_u32(m8)); + return (int)_cvtmask8_u32(m8); +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c b/phivenv/Lib/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c new file mode 100644 index 0000000000000000000000000000000000000000..539b386ac4e74ef2401f11ee2585e5bd3e9d129f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c @@ -0,0 +1,41 @@ +#include +/** + * The following intrinsics don't have direct native support but compilers + * tend to emulate them. + * They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19 + */ +int main(void) +{ + __m512 one_ps = _mm512_set1_ps(1.0f); + __m512d one_pd = _mm512_set1_pd(1.0); + __m512i one_i64 = _mm512_set1_epi64(1); + // add + float sum_ps = _mm512_reduce_add_ps(one_ps); + double sum_pd = _mm512_reduce_add_pd(one_pd); + int sum_int = (int)_mm512_reduce_add_epi64(one_i64); + sum_int += (int)_mm512_reduce_add_epi32(one_i64); + // mul + sum_ps += _mm512_reduce_mul_ps(one_ps); + sum_pd += _mm512_reduce_mul_pd(one_pd); + sum_int += (int)_mm512_reduce_mul_epi64(one_i64); + sum_int += (int)_mm512_reduce_mul_epi32(one_i64); + // min + sum_ps += _mm512_reduce_min_ps(one_ps); + sum_pd += _mm512_reduce_min_pd(one_pd); + sum_int += (int)_mm512_reduce_min_epi32(one_i64); + sum_int += (int)_mm512_reduce_min_epu32(one_i64); + sum_int += (int)_mm512_reduce_min_epi64(one_i64); + // max + sum_ps += _mm512_reduce_max_ps(one_ps); + sum_pd += _mm512_reduce_max_pd(one_pd); + sum_int += (int)_mm512_reduce_max_epi32(one_i64); + sum_int += (int)_mm512_reduce_max_epu32(one_i64); + sum_int += (int)_mm512_reduce_max_epi64(one_i64); + // and + sum_int += (int)_mm512_reduce_and_epi32(one_i64); + sum_int += (int)_mm512_reduce_and_epi64(one_i64); + // or + sum_int += (int)_mm512_reduce_or_epi32(one_i64); + sum_int += (int)_mm512_reduce_or_epi64(one_i64); + return (int)sum_ps + (int)sum_pd + sum_int; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c b/phivenv/Lib/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c new file mode 100644 index 0000000000000000000000000000000000000000..07ea24d7d8d1fa9885d3696271b7517fd3703b34 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c @@ -0,0 +1,12 @@ +/** + * Assembler may not fully support the following VSX3 scalar + * instructions, even though compilers report VSX3 support. + */ +int main(void) +{ + unsigned short bits = 0xFF; + double f; + __asm__ __volatile__("xscvhpdp %x0,%x1" : "=wa"(f) : "wa"(bits)); + __asm__ __volatile__ ("xscvdphp %x0,%x1" : "=wa" (bits) : "wa" (f)); + return bits; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/extra_vsx4_mma.c b/phivenv/Lib/site-packages/numpy/distutils/checks/extra_vsx4_mma.c new file mode 100644 index 0000000000000000000000000000000000000000..b950e9de8609ae632f875187c7123d8e8c00304e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/extra_vsx4_mma.c @@ -0,0 +1,21 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +typedef __vector float fv4sf_t; +typedef __vector unsigned char vec_t; + +int main(void) +{ + __vector_quad acc0; + float a[4] = {0,1,2,3}; + float b[4] = {0,1,2,3}; + vec_t *va = (vec_t *) a; + vec_t *vb = (vec_t *) b; + __builtin_mma_xvf32ger(&acc0, va[0], vb[0]); + fv4sf_t result[4]; + __builtin_mma_disassemble_acc((void *)result, &acc0); + fv4sf_t c0 = result[0]; + return (int)((float*)&c0)[0]; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/extra_vsx_asm.c b/phivenv/Lib/site-packages/numpy/distutils/checks/extra_vsx_asm.c new file mode 100644 index 0000000000000000000000000000000000000000..2b44c7a7fb96f86ffd3653698da091bfc68f36cc --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/extra_vsx_asm.c @@ -0,0 +1,36 @@ +/** + * Testing ASM VSX register number fixer '%x' + * + * old versions of CLANG doesn't support %x in the inline asm template + * which fixes register number when using any of the register constraints wa, wd, wf. + * + * xref: + * - https://bugs.llvm.org/show_bug.cgi?id=31837 + * - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html + */ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__)) + #define vsx_ld vec_vsx_ld + #define vsx_st vec_vsx_st +#else + #define vsx_ld vec_xl + #define vsx_st vec_xst +#endif + +int main(void) +{ + float z4[] = {0, 0, 0, 0}; + signed int zout[] = {0, 0, 0, 0}; + + __vector float vz4 = vsx_ld(0, z4); + __vector signed int asm_ret = vsx_ld(0, zout); + + __asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret)); + + vsx_st(asm_ret, 0, zout); + return zout[0]; +} diff --git a/phivenv/Lib/site-packages/numpy/distutils/checks/test_flags.c b/phivenv/Lib/site-packages/numpy/distutils/checks/test_flags.c new file mode 100644 index 0000000000000000000000000000000000000000..2fcd23daf81229b35933db3f745fa6156aba7039 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/checks/test_flags.c @@ -0,0 +1 @@ +int test_flags; diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__init__.py b/phivenv/Lib/site-packages/numpy/distutils/command/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3af55f092e6f8b3b04bb2b9f4505e8cc3fff2308 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/__init__.py @@ -0,0 +1,41 @@ +"""distutils.command + +Package containing implementation of all the standard Distutils +commands. + +""" +def test_na_writable_attributes_deletion(): + a = np.NA(2) + attr = ['payload', 'dtype'] + for s in attr: + assert_raises(AttributeError, delattr, a, s) + + +__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" + +distutils_all = [ #'build_py', + 'clean', + 'install_clib', + 'install_scripts', + 'bdist', + 'bdist_dumb', + 'bdist_wininst', + ] + +__import__('distutils.command', globals(), locals(), distutils_all) + +__all__ = ['build', + 'config_compiler', + 'config', + 'build_src', + 'build_py', + 'build_ext', + 'build_clib', + 'build_scripts', + 'install', + 'install_data', + 'install_headers', + 'install_lib', + 'bdist_rpm', + 'sdist', + ] + distutils_all diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aaa787773ca8d123b83fd94a5f7fd538517c0b08 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/autodist.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/autodist.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..def71fedcb71c2e16a1c627d905bb84ce601179c Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/autodist.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/bdist_rpm.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/bdist_rpm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..999fbd465560dc1ed5026064b78443c5e4a0d225 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/bdist_rpm.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e70f2dcce550080c5717831832840235f2d39139 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_clib.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_clib.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b308f620144ad17144605af6b39e8846f719c09 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_clib.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_ext.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_ext.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..543b066cc0477c377346fbe3cf91988683e982a4 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_ext.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_py.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_py.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edddaec09c7c740c942fb9f110ceebad7c647116 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_py.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_scripts.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_scripts.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..311239c0fce77d8fdb0fbab569cc8cc1a306c7f0 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_scripts.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_src.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_src.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24ef9b30c39f837aa2f366907a3337d1163de107 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/build_src.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/config.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c57b3bbad3b6c2f4e562335b9675e17bc6de45ba Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/config.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/config_compiler.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/config_compiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a588ff10bb34404c40d69bdff6b4df989d623408 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/config_compiler.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/develop.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/develop.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51515fb4185a7cb395b344fc0e3b47445e61db69 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/develop.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/egg_info.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/egg_info.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30c37848e64233d6bd117b631c875554e47682b7 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/egg_info.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/install.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/install.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cac14bc5abe14be7e4d3d5f20f0ca92cc70318c5 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/install.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/install_clib.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/install_clib.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2a7d91198f187cee8103bb34f3db050301e204e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/install_clib.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/install_data.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/install_data.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd4f522aea0b3a7b3d6df37d5a0feaae6fd64674 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/install_data.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/install_headers.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/install_headers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc8ad4685aca597884346beb401cd9c4a7a3a082 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/install_headers.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/sdist.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/sdist.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e697b70370d83419260d9294d2ff1ea0b08721a Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/command/__pycache__/sdist.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/autodist.py b/phivenv/Lib/site-packages/numpy/distutils/command/autodist.py new file mode 100644 index 0000000000000000000000000000000000000000..815c86216d432daffeb47b8f71f19527c0c6b0d4 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/autodist.py @@ -0,0 +1,148 @@ +"""This module implements additional tests ala autoconf which can be useful. + +""" +import textwrap + +# We put them here since they could be easily reused outside numpy.distutils + +def check_inline(cmd): + """Return the inline identifier (may be empty).""" + cmd._check_compiler() + body = textwrap.dedent(""" + #ifndef __cplusplus + static %(inline)s int static_func (void) + { + return 0; + } + %(inline)s int nostatic_func (void) + { + return 0; + } + #endif""") + + for kw in ['inline', '__inline__', '__inline']: + st = cmd.try_compile(body % {'inline': kw}, None, None) + if st: + return kw + + return '' + + +def check_restrict(cmd): + """Return the restrict identifier (may be empty).""" + cmd._check_compiler() + body = textwrap.dedent(""" + static int static_func (char * %(restrict)s a) + { + return 0; + } + """) + + for kw in ['restrict', '__restrict__', '__restrict']: + st = cmd.try_compile(body % {'restrict': kw}, None, None) + if st: + return kw + + return '' + + +def check_compiler_gcc(cmd): + """Check if the compiler is GCC.""" + + cmd._check_compiler() + body = textwrap.dedent(""" + int + main() + { + #if (! defined __GNUC__) + #error gcc required + #endif + return 0; + } + """) + return cmd.try_compile(body, None, None) + + +def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0): + """ + Check that the gcc version is at least the specified version.""" + + cmd._check_compiler() + version = '.'.join([str(major), str(minor), str(patchlevel)]) + body = textwrap.dedent(""" + int + main() + { + #if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\ + (__GNUC_MINOR__ < %(minor)d) || \\ + (__GNUC_PATCHLEVEL__ < %(patchlevel)d) + #error gcc >= %(version)s required + #endif + return 0; + } + """) + kw = {'version': version, 'major': major, 'minor': minor, + 'patchlevel': patchlevel} + + return cmd.try_compile(body % kw, None, None) + + +def check_gcc_function_attribute(cmd, attribute, name): + """Return True if the given function attribute is supported.""" + cmd._check_compiler() + body = textwrap.dedent(""" + #pragma GCC diagnostic error "-Wattributes" + #pragma clang diagnostic error "-Wattributes" + + int %s %s(void* unused) + { + return 0; + } + + int + main() + { + return 0; + } + """) % (attribute, name) + return cmd.try_compile(body, None, None) != 0 + + +def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code, + include): + """Return True if the given function attribute is supported with + intrinsics.""" + cmd._check_compiler() + body = textwrap.dedent(""" + #include<%s> + int %s %s(void) + { + %s; + return 0; + } + + int + main() + { + return 0; + } + """) % (include, attribute, name, code) + return cmd.try_compile(body, None, None) != 0 + + +def check_gcc_variable_attribute(cmd, attribute): + """Return True if the given variable attribute is supported.""" + cmd._check_compiler() + body = textwrap.dedent(""" + #pragma GCC diagnostic error "-Wattributes" + #pragma clang diagnostic error "-Wattributes" + + int %s foo; + + int + main() + { + return 0; + } + """) % (attribute, ) + return cmd.try_compile(body, None, None) != 0 diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/bdist_rpm.py b/phivenv/Lib/site-packages/numpy/distutils/command/bdist_rpm.py new file mode 100644 index 0000000000000000000000000000000000000000..341e38a9d5c580a30098cfbc9cf5c388b1eccc3e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/bdist_rpm.py @@ -0,0 +1,22 @@ +import os +import sys +if 'setuptools' in sys.modules: + from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm +else: + from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm + +class bdist_rpm(old_bdist_rpm): + + def _make_spec_file(self): + spec_file = old_bdist_rpm._make_spec_file(self) + + # Replace hardcoded setup.py script name + # with the real setup script name. + setup_py = os.path.basename(sys.argv[0]) + if setup_py == 'setup.py': + return spec_file + new_spec_file = [] + for line in spec_file: + line = line.replace('setup.py', setup_py) + new_spec_file.append(line) + return new_spec_file diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/build.py b/phivenv/Lib/site-packages/numpy/distutils/command/build.py new file mode 100644 index 0000000000000000000000000000000000000000..5b062599816f47b996387b326935fc1e20d7cf55 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/build.py @@ -0,0 +1,62 @@ +import os +import sys +from distutils.command.build import build as old_build +from distutils.util import get_platform +from numpy.distutils.command.config_compiler import show_fortran_compilers + +class build(old_build): + + sub_commands = [('config_cc', lambda *args: True), + ('config_fc', lambda *args: True), + ('build_src', old_build.has_ext_modules), + ] + old_build.sub_commands + + user_options = old_build.user_options + [ + ('fcompiler=', None, + "specify the Fortran compiler type"), + ('warn-error', None, + "turn all warnings into errors (-Werror)"), + ('cpu-baseline=', None, + "specify a list of enabled baseline CPU optimizations"), + ('cpu-dispatch=', None, + "specify a list of dispatched CPU optimizations"), + ('disable-optimization', None, + "disable CPU optimized code(dispatch,simd,fast...)"), + ('simd-test=', None, + "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), + ] + + help_options = old_build.help_options + [ + ('help-fcompiler', None, "list available Fortran compilers", + show_fortran_compilers), + ] + + def initialize_options(self): + old_build.initialize_options(self) + self.fcompiler = None + self.warn_error = False + self.cpu_baseline = "min" + self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default + self.disable_optimization = False + """ + the '_simd' module is a very large. Adding more dispatched features + will increase binary size and compile time. By default we minimize + the targeted features to those most commonly used by the NumPy SIMD interface(NPYV), + NOTE: any specified features will be ignored if they're: + - part of the baseline(--cpu-baseline) + - not part of dispatch-able features(--cpu-dispatch) + - not supported by compiler or platform + """ + self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F " \ + "AVX512_SKX VSX VSX2 VSX3 VSX4 NEON ASIMD VX VXE VXE2" + + def finalize_options(self): + build_scripts = self.build_scripts + old_build.finalize_options(self) + plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) + if build_scripts is None: + self.build_scripts = os.path.join(self.build_base, + 'scripts' + plat_specifier) + + def run(self): + old_build.run(self) diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/build_clib.py b/phivenv/Lib/site-packages/numpy/distutils/command/build_clib.py new file mode 100644 index 0000000000000000000000000000000000000000..6bb522b80ca98f62c7a0f29bdaad72e4c19e37ee --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/build_clib.py @@ -0,0 +1,469 @@ +""" Modified version of build_clib that handles fortran source files. +""" +import os +from glob import glob +import shutil +from distutils.command.build_clib import build_clib as old_build_clib +from distutils.errors import DistutilsSetupError, DistutilsError, \ + DistutilsFileError + +from numpy.distutils import log +from distutils.dep_util import newer_group +from numpy.distutils.misc_util import ( + filter_sources, get_lib_source_files, get_numpy_include_dirs, + has_cxx_sources, has_f_sources, is_sequence +) +from numpy.distutils.ccompiler_opt import new_ccompiler_opt + +# Fix Python distutils bug sf #1718574: +_l = old_build_clib.user_options +for _i in range(len(_l)): + if _l[_i][0] in ['build-clib', 'build-temp']: + _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:] +# + + +class build_clib(old_build_clib): + + description = "build C/C++/F libraries used by Python extensions" + + user_options = old_build_clib.user_options + [ + ('fcompiler=', None, + "specify the Fortran compiler type"), + ('inplace', 'i', 'Build in-place'), + ('parallel=', 'j', + "number of parallel jobs"), + ('warn-error', None, + "turn all warnings into errors (-Werror)"), + ('cpu-baseline=', None, + "specify a list of enabled baseline CPU optimizations"), + ('cpu-dispatch=', None, + "specify a list of dispatched CPU optimizations"), + ('disable-optimization', None, + "disable CPU optimized code(dispatch,simd,fast...)"), + ] + + boolean_options = old_build_clib.boolean_options + \ + ['inplace', 'warn-error', 'disable-optimization'] + + def initialize_options(self): + old_build_clib.initialize_options(self) + self.fcompiler = None + self.inplace = 0 + self.parallel = None + self.warn_error = None + self.cpu_baseline = None + self.cpu_dispatch = None + self.disable_optimization = None + + + def finalize_options(self): + if self.parallel: + try: + self.parallel = int(self.parallel) + except ValueError as e: + raise ValueError("--parallel/-j argument must be an integer") from e + old_build_clib.finalize_options(self) + self.set_undefined_options('build', + ('parallel', 'parallel'), + ('warn_error', 'warn_error'), + ('cpu_baseline', 'cpu_baseline'), + ('cpu_dispatch', 'cpu_dispatch'), + ('disable_optimization', 'disable_optimization') + ) + + def have_f_sources(self): + for (lib_name, build_info) in self.libraries: + if has_f_sources(build_info.get('sources', [])): + return True + return False + + def have_cxx_sources(self): + for (lib_name, build_info) in self.libraries: + if has_cxx_sources(build_info.get('sources', [])): + return True + return False + + def run(self): + if not self.libraries: + return + + # Make sure that library sources are complete. + languages = [] + + # Make sure that extension sources are complete. + self.run_command('build_src') + + for (lib_name, build_info) in self.libraries: + l = build_info.get('language', None) + if l and l not in languages: + languages.append(l) + + from distutils.ccompiler import new_compiler + self.compiler = new_compiler(compiler=self.compiler, + dry_run=self.dry_run, + force=self.force) + self.compiler.customize(self.distribution, + need_cxx=self.have_cxx_sources()) + + if self.warn_error: + self.compiler.compiler.append('-Werror') + self.compiler.compiler_so.append('-Werror') + + libraries = self.libraries + self.libraries = None + self.compiler.customize_cmd(self) + self.libraries = libraries + + self.compiler.show_customization() + + if not self.disable_optimization: + dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") + dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) + opt_cache_path = os.path.abspath( + os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py') + ) + if hasattr(self, "compiler_opt"): + # By default `CCompilerOpt` update the cache at the exit of + # the process, which may lead to duplicate building + # (see build_extension()/force_rebuild) if run() called + # multiple times within the same os process/thread without + # giving the chance the previous instances of `CCompilerOpt` + # to update the cache. + self.compiler_opt.cache_flush() + + self.compiler_opt = new_ccompiler_opt( + compiler=self.compiler, dispatch_hpath=dispatch_hpath, + cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, + cache_path=opt_cache_path + ) + def report(copt): + log.info("\n########### CLIB COMPILER OPTIMIZATION ###########") + log.info(copt.report(full=True)) + + import atexit + atexit.register(report, self.compiler_opt) + + if self.have_f_sources(): + from numpy.distutils.fcompiler import new_fcompiler + self._f_compiler = new_fcompiler(compiler=self.fcompiler, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90='f90' in languages, + c_compiler=self.compiler) + if self._f_compiler is not None: + self._f_compiler.customize(self.distribution) + + libraries = self.libraries + self.libraries = None + self._f_compiler.customize_cmd(self) + self.libraries = libraries + + self._f_compiler.show_customization() + else: + self._f_compiler = None + + self.build_libraries(self.libraries) + + if self.inplace: + for l in self.distribution.installed_libraries: + libname = self.compiler.library_filename(l.name) + source = os.path.join(self.build_clib, libname) + target = os.path.join(l.target_dir, libname) + self.mkpath(l.target_dir) + shutil.copy(source, target) + + def get_source_files(self): + self.check_library_list(self.libraries) + filenames = [] + for lib in self.libraries: + filenames.extend(get_lib_source_files(lib)) + return filenames + + def build_libraries(self, libraries): + for (lib_name, build_info) in libraries: + self.build_a_library(build_info, lib_name, libraries) + + def assemble_flags(self, in_flags): + """ Assemble flags from flag list + + Parameters + ---------- + in_flags : None or sequence + None corresponds to empty list. Sequence elements can be strings + or callables that return lists of strings. Callable takes `self` as + single parameter. + + Returns + ------- + out_flags : list + """ + if in_flags is None: + return [] + out_flags = [] + for in_flag in in_flags: + if callable(in_flag): + out_flags += in_flag(self) + else: + out_flags.append(in_flag) + return out_flags + + def build_a_library(self, build_info, lib_name, libraries): + # default compilers + compiler = self.compiler + fcompiler = self._f_compiler + + sources = build_info.get('sources') + if sources is None or not is_sequence(sources): + raise DistutilsSetupError(("in 'libraries' option (library '%s'), " + + "'sources' must be present and must be " + + "a list of source filenames") % lib_name) + sources = list(sources) + + c_sources, cxx_sources, f_sources, fmodule_sources \ + = filter_sources(sources) + requiref90 = not not fmodule_sources or \ + build_info.get('language', 'c') == 'f90' + + # save source type information so that build_ext can use it. + source_languages = [] + if c_sources: + source_languages.append('c') + if cxx_sources: + source_languages.append('c++') + if requiref90: + source_languages.append('f90') + elif f_sources: + source_languages.append('f77') + build_info['source_languages'] = source_languages + + lib_file = compiler.library_filename(lib_name, + output_dir=self.build_clib) + depends = sources + build_info.get('depends', []) + + force_rebuild = self.force + if not self.disable_optimization and not self.compiler_opt.is_cached(): + log.debug("Detected changes on compiler optimizations") + force_rebuild = True + if not (force_rebuild or newer_group(depends, lib_file, 'newer')): + log.debug("skipping '%s' library (up-to-date)", lib_name) + return + else: + log.info("building '%s' library", lib_name) + + config_fc = build_info.get('config_fc', {}) + if fcompiler is not None and config_fc: + log.info('using additional config_fc from setup script ' + 'for fortran compiler: %s' + % (config_fc,)) + from numpy.distutils.fcompiler import new_fcompiler + fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90=requiref90, + c_compiler=self.compiler) + if fcompiler is not None: + dist = self.distribution + base_config_fc = dist.get_option_dict('config_fc').copy() + base_config_fc.update(config_fc) + fcompiler.customize(base_config_fc) + + # check availability of Fortran compilers + if (f_sources or fmodule_sources) and fcompiler is None: + raise DistutilsError("library %s has Fortran sources" + " but no Fortran compiler found" % (lib_name)) + + if fcompiler is not None: + fcompiler.extra_f77_compile_args = build_info.get( + 'extra_f77_compile_args') or [] + fcompiler.extra_f90_compile_args = build_info.get( + 'extra_f90_compile_args') or [] + + macros = build_info.get('macros') + if macros is None: + macros = [] + include_dirs = build_info.get('include_dirs') + if include_dirs is None: + include_dirs = [] + # Flags can be strings, or callables that return a list of strings. + extra_postargs = self.assemble_flags( + build_info.get('extra_compiler_args')) + extra_cflags = self.assemble_flags( + build_info.get('extra_cflags')) + extra_cxxflags = self.assemble_flags( + build_info.get('extra_cxxflags')) + + include_dirs.extend(get_numpy_include_dirs()) + # where compiled F90 module files are: + module_dirs = build_info.get('module_dirs') or [] + module_build_dir = os.path.dirname(lib_file) + if requiref90: + self.mkpath(module_build_dir) + + if compiler.compiler_type == 'msvc': + # this hack works around the msvc compiler attributes + # problem, msvc uses its own convention :( + c_sources += cxx_sources + cxx_sources = [] + extra_cflags += extra_cxxflags + + # filtering C dispatch-table sources when optimization is not disabled, + # otherwise treated as normal sources. + copt_c_sources = [] + copt_cxx_sources = [] + copt_baseline_flags = [] + copt_macros = [] + if not self.disable_optimization: + bsrc_dir = self.get_finalized_command("build_src").build_src + dispatch_hpath = os.path.join("numpy", "distutils", "include") + dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) + include_dirs.append(dispatch_hpath) + # copt_build_src = None if self.inplace else bsrc_dir + copt_build_src = bsrc_dir + for _srcs, _dst, _ext in ( + ((c_sources,), copt_c_sources, ('.dispatch.c',)), + ((c_sources, cxx_sources), copt_cxx_sources, + ('.dispatch.cpp', '.dispatch.cxx')) + ): + for _src in _srcs: + _dst += [ + _src.pop(_src.index(s)) + for s in _src[:] if s.endswith(_ext) + ] + copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() + else: + copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) + + objects = [] + if copt_cxx_sources: + log.info("compiling C++ dispatch-able sources") + objects += self.compiler_opt.try_dispatch( + copt_c_sources, + output_dir=self.build_temp, + src_dir=copt_build_src, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs + extra_cxxflags, + ccompiler=cxx_compiler + ) + + if copt_c_sources: + log.info("compiling C dispatch-able sources") + objects += self.compiler_opt.try_dispatch( + copt_c_sources, + output_dir=self.build_temp, + src_dir=copt_build_src, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs + extra_cflags) + + if c_sources: + log.info("compiling C sources") + objects += compiler.compile( + c_sources, + output_dir=self.build_temp, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=(extra_postargs + + copt_baseline_flags + + extra_cflags)) + + if cxx_sources: + log.info("compiling C++ sources") + cxx_compiler = compiler.cxx_compiler() + cxx_objects = cxx_compiler.compile( + cxx_sources, + output_dir=self.build_temp, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=(extra_postargs + + copt_baseline_flags + + extra_cxxflags)) + objects.extend(cxx_objects) + + if f_sources or fmodule_sources: + extra_postargs = [] + f_objects = [] + + if requiref90: + if fcompiler.module_dir_switch is None: + existing_modules = glob('*.mod') + extra_postargs += fcompiler.module_options( + module_dirs, module_build_dir) + + if fmodule_sources: + log.info("compiling Fortran 90 module sources") + f_objects += fcompiler.compile(fmodule_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs) + + if requiref90 and self._f_compiler.module_dir_switch is None: + # move new compiled F90 module files to module_build_dir + for f in glob('*.mod'): + if f in existing_modules: + continue + t = os.path.join(module_build_dir, f) + if os.path.abspath(f) == os.path.abspath(t): + continue + if os.path.isfile(t): + os.remove(t) + try: + self.move_file(f, module_build_dir) + except DistutilsFileError: + log.warn('failed to move %r to %r' + % (f, module_build_dir)) + + if f_sources: + log.info("compiling Fortran sources") + f_objects += fcompiler.compile(f_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs) + else: + f_objects = [] + + if f_objects and not fcompiler.can_ccompiler_link(compiler): + # Default linker cannot link Fortran object files, and results + # need to be wrapped later. Instead of creating a real static + # library, just keep track of the object files. + listfn = os.path.join(self.build_clib, + lib_name + '.fobjects') + with open(listfn, 'w') as f: + f.write("\n".join(os.path.abspath(obj) for obj in f_objects)) + + listfn = os.path.join(self.build_clib, + lib_name + '.cobjects') + with open(listfn, 'w') as f: + f.write("\n".join(os.path.abspath(obj) for obj in objects)) + + # create empty "library" file for dependency tracking + lib_fname = os.path.join(self.build_clib, + lib_name + compiler.static_lib_extension) + with open(lib_fname, 'wb') as f: + pass + else: + # assume that default linker is suitable for + # linking Fortran object files + objects.extend(f_objects) + compiler.create_static_lib(objects, lib_name, + output_dir=self.build_clib, + debug=self.debug) + + # fix library dependencies + clib_libraries = build_info.get('libraries', []) + for lname, binfo in libraries: + if lname in clib_libraries: + clib_libraries.extend(binfo.get('libraries', [])) + if clib_libraries: + build_info['libraries'] = clib_libraries diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/build_ext.py b/phivenv/Lib/site-packages/numpy/distutils/command/build_ext.py new file mode 100644 index 0000000000000000000000000000000000000000..1087f7c2014b446428c22d915f51d251f685ff0f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/build_ext.py @@ -0,0 +1,752 @@ +""" Modified version of build_ext that handles fortran source files. + +""" +import os +import subprocess +from glob import glob + +from distutils.dep_util import newer_group +from distutils.command.build_ext import build_ext as old_build_ext +from distutils.errors import DistutilsFileError, DistutilsSetupError,\ + DistutilsError +from distutils.file_util import copy_file + +from numpy.distutils import log +from numpy.distutils.exec_command import filepath_from_subprocess_output +from numpy.distutils.system_info import combine_paths +from numpy.distutils.misc_util import ( + filter_sources, get_ext_source_files, get_numpy_include_dirs, + has_cxx_sources, has_f_sources, is_sequence +) +from numpy.distutils.command.config_compiler import show_fortran_compilers +from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt + +class build_ext (old_build_ext): + + description = "build C/C++/F extensions (compile/link to build directory)" + + user_options = old_build_ext.user_options + [ + ('fcompiler=', None, + "specify the Fortran compiler type"), + ('parallel=', 'j', + "number of parallel jobs"), + ('warn-error', None, + "turn all warnings into errors (-Werror)"), + ('cpu-baseline=', None, + "specify a list of enabled baseline CPU optimizations"), + ('cpu-dispatch=', None, + "specify a list of dispatched CPU optimizations"), + ('disable-optimization', None, + "disable CPU optimized code(dispatch,simd,fast...)"), + ('simd-test=', None, + "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), + ] + + help_options = old_build_ext.help_options + [ + ('help-fcompiler', None, "list available Fortran compilers", + show_fortran_compilers), + ] + + boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization'] + + def initialize_options(self): + old_build_ext.initialize_options(self) + self.fcompiler = None + self.parallel = None + self.warn_error = None + self.cpu_baseline = None + self.cpu_dispatch = None + self.disable_optimization = None + self.simd_test = None + + def finalize_options(self): + if self.parallel: + try: + self.parallel = int(self.parallel) + except ValueError as e: + raise ValueError("--parallel/-j argument must be an integer") from e + + # Ensure that self.include_dirs and self.distribution.include_dirs + # refer to the same list object. finalize_options will modify + # self.include_dirs, but self.distribution.include_dirs is used + # during the actual build. + # self.include_dirs is None unless paths are specified with + # --include-dirs. + # The include paths will be passed to the compiler in the order: + # numpy paths, --include-dirs paths, Python include path. + if isinstance(self.include_dirs, str): + self.include_dirs = self.include_dirs.split(os.pathsep) + incl_dirs = self.include_dirs or [] + if self.distribution.include_dirs is None: + self.distribution.include_dirs = [] + self.include_dirs = self.distribution.include_dirs + self.include_dirs.extend(incl_dirs) + + old_build_ext.finalize_options(self) + self.set_undefined_options('build', + ('parallel', 'parallel'), + ('warn_error', 'warn_error'), + ('cpu_baseline', 'cpu_baseline'), + ('cpu_dispatch', 'cpu_dispatch'), + ('disable_optimization', 'disable_optimization'), + ('simd_test', 'simd_test') + ) + CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test + + def run(self): + if not self.extensions: + return + + # Make sure that extension sources are complete. + self.run_command('build_src') + + if self.distribution.has_c_libraries(): + if self.inplace: + if self.distribution.have_run.get('build_clib'): + log.warn('build_clib already run, it is too late to ' + 'ensure in-place build of build_clib') + build_clib = self.distribution.get_command_obj( + 'build_clib') + else: + build_clib = self.distribution.get_command_obj( + 'build_clib') + build_clib.inplace = 1 + build_clib.ensure_finalized() + build_clib.run() + self.distribution.have_run['build_clib'] = 1 + + else: + self.run_command('build_clib') + build_clib = self.get_finalized_command('build_clib') + self.library_dirs.append(build_clib.build_clib) + else: + build_clib = None + + # Not including C libraries to the list of + # extension libraries automatically to prevent + # bogus linking commands. Extensions must + # explicitly specify the C libraries that they use. + + from distutils.ccompiler import new_compiler + from numpy.distutils.fcompiler import new_fcompiler + + compiler_type = self.compiler + # Initialize C compiler: + self.compiler = new_compiler(compiler=compiler_type, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force) + self.compiler.customize(self.distribution) + self.compiler.customize_cmd(self) + + if self.warn_error: + self.compiler.compiler.append('-Werror') + self.compiler.compiler_so.append('-Werror') + + self.compiler.show_customization() + + if not self.disable_optimization: + dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") + dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) + opt_cache_path = os.path.abspath( + os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py') + ) + if hasattr(self, "compiler_opt"): + # By default `CCompilerOpt` update the cache at the exit of + # the process, which may lead to duplicate building + # (see build_extension()/force_rebuild) if run() called + # multiple times within the same os process/thread without + # giving the chance the previous instances of `CCompilerOpt` + # to update the cache. + self.compiler_opt.cache_flush() + + self.compiler_opt = new_ccompiler_opt( + compiler=self.compiler, dispatch_hpath=dispatch_hpath, + cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, + cache_path=opt_cache_path + ) + def report(copt): + log.info("\n########### EXT COMPILER OPTIMIZATION ###########") + log.info(copt.report(full=True)) + + import atexit + atexit.register(report, self.compiler_opt) + + # Setup directory for storing generated extra DLL files on Windows + self.extra_dll_dir = os.path.join(self.build_temp, '.libs') + if not os.path.isdir(self.extra_dll_dir): + os.makedirs(self.extra_dll_dir) + + # Create mapping of libraries built by build_clib: + clibs = {} + if build_clib is not None: + for libname, build_info in build_clib.libraries or []: + if libname in clibs and clibs[libname] != build_info: + log.warn('library %r defined more than once,' + ' overwriting build_info\n%s... \nwith\n%s...' + % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) + clibs[libname] = build_info + # .. and distribution libraries: + for libname, build_info in self.distribution.libraries or []: + if libname in clibs: + # build_clib libraries have a precedence before distribution ones + continue + clibs[libname] = build_info + + # Determine if C++/Fortran 77/Fortran 90 compilers are needed. + # Update extension libraries, library_dirs, and macros. + all_languages = set() + for ext in self.extensions: + ext_languages = set() + c_libs = [] + c_lib_dirs = [] + macros = [] + for libname in ext.libraries: + if libname in clibs: + binfo = clibs[libname] + c_libs += binfo.get('libraries', []) + c_lib_dirs += binfo.get('library_dirs', []) + for m in binfo.get('macros', []): + if m not in macros: + macros.append(m) + + for l in clibs.get(libname, {}).get('source_languages', []): + ext_languages.add(l) + if c_libs: + new_c_libs = ext.libraries + c_libs + log.info('updating extension %r libraries from %r to %r' + % (ext.name, ext.libraries, new_c_libs)) + ext.libraries = new_c_libs + ext.library_dirs = ext.library_dirs + c_lib_dirs + if macros: + log.info('extending extension %r defined_macros with %r' + % (ext.name, macros)) + ext.define_macros = ext.define_macros + macros + + # determine extension languages + if has_f_sources(ext.sources): + ext_languages.add('f77') + if has_cxx_sources(ext.sources): + ext_languages.add('c++') + l = ext.language or self.compiler.detect_language(ext.sources) + if l: + ext_languages.add(l) + + # reset language attribute for choosing proper linker + # + # When we build extensions with multiple languages, we have to + # choose a linker. The rules here are: + # 1. if there is Fortran code, always prefer the Fortran linker, + # 2. otherwise prefer C++ over C, + # 3. Users can force a particular linker by using + # `language='c'` # or 'c++', 'f90', 'f77' + # in their config.add_extension() calls. + if 'c++' in ext_languages: + ext_language = 'c++' + else: + ext_language = 'c' # default + + has_fortran = False + if 'f90' in ext_languages: + ext_language = 'f90' + has_fortran = True + elif 'f77' in ext_languages: + ext_language = 'f77' + has_fortran = True + + if not ext.language or has_fortran: + if l and l != ext_language and ext.language: + log.warn('resetting extension %r language from %r to %r.' % + (ext.name, l, ext_language)) + + ext.language = ext_language + + # global language + all_languages.update(ext_languages) + + need_f90_compiler = 'f90' in all_languages + need_f77_compiler = 'f77' in all_languages + need_cxx_compiler = 'c++' in all_languages + + # Initialize C++ compiler: + if need_cxx_compiler: + self._cxx_compiler = new_compiler(compiler=compiler_type, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force) + compiler = self._cxx_compiler + compiler.customize(self.distribution, need_cxx=need_cxx_compiler) + compiler.customize_cmd(self) + compiler.show_customization() + self._cxx_compiler = compiler.cxx_compiler() + else: + self._cxx_compiler = None + + # Initialize Fortran 77 compiler: + if need_f77_compiler: + ctype = self.fcompiler + self._f77_compiler = new_fcompiler(compiler=self.fcompiler, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90=False, + c_compiler=self.compiler) + fcompiler = self._f77_compiler + if fcompiler: + ctype = fcompiler.compiler_type + fcompiler.customize(self.distribution) + if fcompiler and fcompiler.get_version(): + fcompiler.customize_cmd(self) + fcompiler.show_customization() + else: + self.warn('f77_compiler=%s is not available.' % + (ctype)) + self._f77_compiler = None + else: + self._f77_compiler = None + + # Initialize Fortran 90 compiler: + if need_f90_compiler: + ctype = self.fcompiler + self._f90_compiler = new_fcompiler(compiler=self.fcompiler, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90=True, + c_compiler=self.compiler) + fcompiler = self._f90_compiler + if fcompiler: + ctype = fcompiler.compiler_type + fcompiler.customize(self.distribution) + if fcompiler and fcompiler.get_version(): + fcompiler.customize_cmd(self) + fcompiler.show_customization() + else: + self.warn('f90_compiler=%s is not available.' % + (ctype)) + self._f90_compiler = None + else: + self._f90_compiler = None + + # Build extensions + self.build_extensions() + + # Copy over any extra DLL files + # FIXME: In the case where there are more than two packages, + # we blindly assume that both packages need all of the libraries, + # resulting in a larger wheel than is required. This should be fixed, + # but it's so rare that I won't bother to handle it. + pkg_roots = { + self.get_ext_fullname(ext.name).split('.')[0] + for ext in self.extensions + } + for pkg_root in pkg_roots: + shared_lib_dir = os.path.join(pkg_root, '.libs') + if not self.inplace: + shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir) + for fn in os.listdir(self.extra_dll_dir): + if not os.path.isdir(shared_lib_dir): + os.makedirs(shared_lib_dir) + if not fn.lower().endswith('.dll'): + continue + runtime_lib = os.path.join(self.extra_dll_dir, fn) + copy_file(runtime_lib, shared_lib_dir) + + def swig_sources(self, sources, extensions=None): + # Do nothing. Swig sources have been handled in build_src command. + return sources + + def build_extension(self, ext): + sources = ext.sources + if sources is None or not is_sequence(sources): + raise DistutilsSetupError( + ("in 'ext_modules' option (extension '%s'), " + + "'sources' must be present and must be " + + "a list of source filenames") % ext.name) + sources = list(sources) + + if not sources: + return + + fullname = self.get_ext_fullname(ext.name) + if self.inplace: + modpath = fullname.split('.') + package = '.'.join(modpath[0:-1]) + base = modpath[-1] + build_py = self.get_finalized_command('build_py') + package_dir = build_py.get_package_dir(package) + ext_filename = os.path.join(package_dir, + self.get_ext_filename(base)) + else: + ext_filename = os.path.join(self.build_lib, + self.get_ext_filename(fullname)) + depends = sources + ext.depends + + force_rebuild = self.force + if not self.disable_optimization and not self.compiler_opt.is_cached(): + log.debug("Detected changes on compiler optimizations") + force_rebuild = True + if not (force_rebuild or newer_group(depends, ext_filename, 'newer')): + log.debug("skipping '%s' extension (up-to-date)", ext.name) + return + else: + log.info("building '%s' extension", ext.name) + + extra_args = ext.extra_compile_args or [] + extra_cflags = getattr(ext, 'extra_c_compile_args', None) or [] + extra_cxxflags = getattr(ext, 'extra_cxx_compile_args', None) or [] + + macros = ext.define_macros[:] + for undef in ext.undef_macros: + macros.append((undef,)) + + c_sources, cxx_sources, f_sources, fmodule_sources = \ + filter_sources(ext.sources) + + if self.compiler.compiler_type == 'msvc': + if cxx_sources: + # Needed to compile kiva.agg._agg extension. + extra_args.append('/Zm1000') + extra_cflags += extra_cxxflags + # this hack works around the msvc compiler attributes + # problem, msvc uses its own convention :( + c_sources += cxx_sources + cxx_sources = [] + + # Set Fortran/C++ compilers for compilation and linking. + if ext.language == 'f90': + fcompiler = self._f90_compiler + elif ext.language == 'f77': + fcompiler = self._f77_compiler + else: # in case ext.language is c++, for instance + fcompiler = self._f90_compiler or self._f77_compiler + if fcompiler is not None: + fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr( + ext, 'extra_f77_compile_args') else [] + fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr( + ext, 'extra_f90_compile_args') else [] + cxx_compiler = self._cxx_compiler + + # check for the availability of required compilers + if cxx_sources and cxx_compiler is None: + raise DistutilsError("extension %r has C++ sources" + "but no C++ compiler found" % (ext.name)) + if (f_sources or fmodule_sources) and fcompiler is None: + raise DistutilsError("extension %r has Fortran sources " + "but no Fortran compiler found" % (ext.name)) + if ext.language in ['f77', 'f90'] and fcompiler is None: + self.warn("extension %r has Fortran libraries " + "but no Fortran linker found, using default linker" % (ext.name)) + if ext.language == 'c++' and cxx_compiler is None: + self.warn("extension %r has C++ libraries " + "but no C++ linker found, using default linker" % (ext.name)) + + kws = {'depends': ext.depends} + output_dir = self.build_temp + + include_dirs = ext.include_dirs + get_numpy_include_dirs() + + # filtering C dispatch-table sources when optimization is not disabled, + # otherwise treated as normal sources. + copt_c_sources = [] + copt_cxx_sources = [] + copt_baseline_flags = [] + copt_macros = [] + if not self.disable_optimization: + bsrc_dir = self.get_finalized_command("build_src").build_src + dispatch_hpath = os.path.join("numpy", "distutils", "include") + dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) + include_dirs.append(dispatch_hpath) + + # copt_build_src = None if self.inplace else bsrc_dir + # Always generate the generated config files and + # dispatch-able sources inside the build directory, + # even if the build option `inplace` is enabled. + # This approach prevents conflicts with Meson-generated + # config headers. Since `spin build --clean` will not remove + # these headers, they might overwrite the generated Meson headers, + # causing compatibility issues. Maintaining separate directories + # ensures compatibility between distutils dispatch config headers + # and Meson headers, avoiding build disruptions. + # See gh-24450 for more details. + copt_build_src = bsrc_dir + for _srcs, _dst, _ext in ( + ((c_sources,), copt_c_sources, ('.dispatch.c',)), + ((c_sources, cxx_sources), copt_cxx_sources, + ('.dispatch.cpp', '.dispatch.cxx')) + ): + for _src in _srcs: + _dst += [ + _src.pop(_src.index(s)) + for s in _src[:] if s.endswith(_ext) + ] + copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() + else: + copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) + + c_objects = [] + if copt_cxx_sources: + log.info("compiling C++ dispatch-able sources") + c_objects += self.compiler_opt.try_dispatch( + copt_cxx_sources, + output_dir=output_dir, + src_dir=copt_build_src, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_args + extra_cxxflags, + ccompiler=cxx_compiler, + **kws + ) + if copt_c_sources: + log.info("compiling C dispatch-able sources") + c_objects += self.compiler_opt.try_dispatch( + copt_c_sources, + output_dir=output_dir, + src_dir=copt_build_src, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_args + extra_cflags, + **kws) + if c_sources: + log.info("compiling C sources") + c_objects += self.compiler.compile( + c_sources, + output_dir=output_dir, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=(extra_args + copt_baseline_flags + + extra_cflags), + **kws) + if cxx_sources: + log.info("compiling C++ sources") + c_objects += cxx_compiler.compile( + cxx_sources, + output_dir=output_dir, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=(extra_args + copt_baseline_flags + + extra_cxxflags), + **kws) + + extra_postargs = [] + f_objects = [] + if fmodule_sources: + log.info("compiling Fortran 90 module sources") + module_dirs = ext.module_dirs[:] + module_build_dir = os.path.join( + self.build_temp, os.path.dirname( + self.get_ext_filename(fullname))) + + self.mkpath(module_build_dir) + if fcompiler.module_dir_switch is None: + existing_modules = glob('*.mod') + extra_postargs += fcompiler.module_options( + module_dirs, module_build_dir) + f_objects += fcompiler.compile(fmodule_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs, + depends=ext.depends) + + if fcompiler.module_dir_switch is None: + for f in glob('*.mod'): + if f in existing_modules: + continue + t = os.path.join(module_build_dir, f) + if os.path.abspath(f) == os.path.abspath(t): + continue + if os.path.isfile(t): + os.remove(t) + try: + self.move_file(f, module_build_dir) + except DistutilsFileError: + log.warn('failed to move %r to %r' % + (f, module_build_dir)) + if f_sources: + log.info("compiling Fortran sources") + f_objects += fcompiler.compile(f_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs, + depends=ext.depends) + + if f_objects and not fcompiler.can_ccompiler_link(self.compiler): + unlinkable_fobjects = f_objects + objects = c_objects + else: + unlinkable_fobjects = [] + objects = c_objects + f_objects + + if ext.extra_objects: + objects.extend(ext.extra_objects) + extra_args = ext.extra_link_args or [] + libraries = self.get_libraries(ext)[:] + library_dirs = ext.library_dirs[:] + + linker = self.compiler.link_shared_object + # Always use system linker when using MSVC compiler. + if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'): + # expand libraries with fcompiler libraries as we are + # not using fcompiler linker + self._libs_with_msvc_and_fortran( + fcompiler, libraries, library_dirs) + if ext.runtime_library_dirs: + # gcc adds RPATH to the link. On windows, copy the dll into + # self.extra_dll_dir instead. + for d in ext.runtime_library_dirs: + for f in glob(d + '/*.dll'): + copy_file(f, self.extra_dll_dir) + ext.runtime_library_dirs = [] + + elif ext.language in ['f77', 'f90'] and fcompiler is not None: + linker = fcompiler.link_shared_object + if ext.language == 'c++' and cxx_compiler is not None: + linker = cxx_compiler.link_shared_object + + if fcompiler is not None: + objects, libraries = self._process_unlinkable_fobjects( + objects, libraries, + fcompiler, library_dirs, + unlinkable_fobjects) + + linker(objects, ext_filename, + libraries=libraries, + library_dirs=library_dirs, + runtime_library_dirs=ext.runtime_library_dirs, + extra_postargs=extra_args, + export_symbols=self.get_export_symbols(ext), + debug=self.debug, + build_temp=self.build_temp, + target_lang=ext.language) + + def _add_dummy_mingwex_sym(self, c_sources): + build_src = self.get_finalized_command("build_src").build_src + build_clib = self.get_finalized_command("build_clib").build_clib + objects = self.compiler.compile([os.path.join(build_src, + "gfortran_vs2003_hack.c")], + output_dir=self.build_temp) + self.compiler.create_static_lib( + objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) + + def _process_unlinkable_fobjects(self, objects, libraries, + fcompiler, library_dirs, + unlinkable_fobjects): + libraries = list(libraries) + objects = list(objects) + unlinkable_fobjects = list(unlinkable_fobjects) + + # Expand possible fake static libraries to objects; + # make sure to iterate over a copy of the list as + # "fake" libraries will be removed as they are + # encountered + for lib in libraries[:]: + for libdir in library_dirs: + fake_lib = os.path.join(libdir, lib + '.fobjects') + if os.path.isfile(fake_lib): + # Replace fake static library + libraries.remove(lib) + with open(fake_lib) as f: + unlinkable_fobjects.extend(f.read().splitlines()) + + # Expand C objects + c_lib = os.path.join(libdir, lib + '.cobjects') + with open(c_lib) as f: + objects.extend(f.read().splitlines()) + + # Wrap unlinkable objects to a linkable one + if unlinkable_fobjects: + fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects] + wrapped = fcompiler.wrap_unlinkable_objects( + fobjects, output_dir=self.build_temp, + extra_dll_dir=self.extra_dll_dir) + objects.extend(wrapped) + + return objects, libraries + + def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, + c_library_dirs): + if fcompiler is None: + return + + for libname in c_libraries: + if libname.startswith('msvc'): + continue + fileexists = False + for libdir in c_library_dirs or []: + libfile = os.path.join(libdir, '%s.lib' % (libname)) + if os.path.isfile(libfile): + fileexists = True + break + if fileexists: + continue + # make g77-compiled static libs available to MSVC + fileexists = False + for libdir in c_library_dirs: + libfile = os.path.join(libdir, 'lib%s.a' % (libname)) + if os.path.isfile(libfile): + # copy libname.a file to name.lib so that MSVC linker + # can find it + libfile2 = os.path.join(self.build_temp, libname + '.lib') + copy_file(libfile, libfile2) + if self.build_temp not in c_library_dirs: + c_library_dirs.append(self.build_temp) + fileexists = True + break + if fileexists: + continue + log.warn('could not find library %r in directories %s' + % (libname, c_library_dirs)) + + # Always use system linker when using MSVC compiler. + f_lib_dirs = [] + for dir in fcompiler.library_dirs: + # correct path when compiling in Cygwin but with normal Win + # Python + if dir.startswith('/usr/lib'): + try: + dir = subprocess.check_output(['cygpath', '-w', dir]) + except (OSError, subprocess.CalledProcessError): + pass + else: + dir = filepath_from_subprocess_output(dir) + f_lib_dirs.append(dir) + c_library_dirs.extend(f_lib_dirs) + + # make g77-compiled static libs available to MSVC + for lib in fcompiler.libraries: + if not lib.startswith('msvc'): + c_libraries.append(lib) + p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') + if p: + dst_name = os.path.join(self.build_temp, lib + '.lib') + if not os.path.isfile(dst_name): + copy_file(p[0], dst_name) + if self.build_temp not in c_library_dirs: + c_library_dirs.append(self.build_temp) + + def get_source_files(self): + self.check_extensions_list(self.extensions) + filenames = [] + for ext in self.extensions: + filenames.extend(get_ext_source_files(ext)) + return filenames + + def get_outputs(self): + self.check_extensions_list(self.extensions) + + outputs = [] + for ext in self.extensions: + if not ext.sources: + continue + fullname = self.get_ext_fullname(ext.name) + outputs.append(os.path.join(self.build_lib, + self.get_ext_filename(fullname))) + return outputs diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/build_py.py b/phivenv/Lib/site-packages/numpy/distutils/command/build_py.py new file mode 100644 index 0000000000000000000000000000000000000000..c4efde5042df4975a5b7888af9fb3ad9bd59ce5d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/build_py.py @@ -0,0 +1,31 @@ +from distutils.command.build_py import build_py as old_build_py +from numpy.distutils.misc_util import is_string + +class build_py(old_build_py): + + def run(self): + build_src = self.get_finalized_command('build_src') + if build_src.py_modules_dict and self.packages is None: + self.packages = list(build_src.py_modules_dict.keys ()) + old_build_py.run(self) + + def find_package_modules(self, package, package_dir): + modules = old_build_py.find_package_modules(self, package, package_dir) + + # Find build_src generated *.py files. + build_src = self.get_finalized_command('build_src') + modules += build_src.py_modules_dict.get(package, []) + + return modules + + def find_modules(self): + old_py_modules = self.py_modules[:] + new_py_modules = [_m for _m in self.py_modules if is_string(_m)] + self.py_modules[:] = new_py_modules + modules = old_build_py.find_modules(self) + self.py_modules[:] = old_py_modules + + return modules + + # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple + # and item[2] is source file. diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/build_scripts.py b/phivenv/Lib/site-packages/numpy/distutils/command/build_scripts.py new file mode 100644 index 0000000000000000000000000000000000000000..9ea703e4486c2f0f72a0f0b22fa263ac3d41a333 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/build_scripts.py @@ -0,0 +1,49 @@ +""" Modified version of build_scripts that handles building scripts from functions. + +""" +from distutils.command.build_scripts import build_scripts as old_build_scripts +from numpy.distutils import log +from numpy.distutils.misc_util import is_string + +class build_scripts(old_build_scripts): + + def generate_scripts(self, scripts): + new_scripts = [] + func_scripts = [] + for script in scripts: + if is_string(script): + new_scripts.append(script) + else: + func_scripts.append(script) + if not func_scripts: + return new_scripts + + build_dir = self.build_dir + self.mkpath(build_dir) + for func in func_scripts: + script = func(build_dir) + if not script: + continue + if is_string(script): + log.info(" adding '%s' to scripts" % (script,)) + new_scripts.append(script) + else: + [log.info(" adding '%s' to scripts" % (s,)) for s in script] + new_scripts.extend(list(script)) + return new_scripts + + def run (self): + if not self.scripts: + return + + self.scripts = self.generate_scripts(self.scripts) + # Now make sure that the distribution object has this list of scripts. + # setuptools' develop command requires that this be a list of filenames, + # not functions. + self.distribution.scripts = self.scripts + + return old_build_scripts.run(self) + + def get_source_files(self): + from numpy.distutils.misc_util import get_script_files + return get_script_files(self.scripts) diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/build_src.py b/phivenv/Lib/site-packages/numpy/distutils/command/build_src.py new file mode 100644 index 0000000000000000000000000000000000000000..be7574119f8857b8e1214f752deaa3139faaeeea --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/build_src.py @@ -0,0 +1,773 @@ +""" Build swig and f2py sources. +""" +import os +import re +import sys +import shlex +import copy + +from distutils.command import build_ext +from distutils.dep_util import newer_group, newer +from distutils.util import get_platform +from distutils.errors import DistutilsError, DistutilsSetupError + + +# this import can't be done here, as it uses numpy stuff only available +# after it's installed +#import numpy.f2py +from numpy.distutils import log +from numpy.distutils.misc_util import ( + fortran_ext_match, appendpath, is_string, is_sequence, get_cmd + ) +from numpy.distutils.from_template import process_file as process_f_file +from numpy.distutils.conv_template import process_file as process_c_file + +def subst_vars(target, source, d): + """Substitute any occurrence of @foo@ by d['foo'] from source file into + target.""" + var = re.compile('@([a-zA-Z_]+)@') + with open(source, 'r') as fs: + with open(target, 'w') as ft: + for l in fs: + m = var.search(l) + if m: + ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) + else: + ft.write(l) + +class build_src(build_ext.build_ext): + + description = "build sources from SWIG, F2PY files or a function" + + user_options = [ + ('build-src=', 'd', "directory to \"build\" sources to"), + ('f2py-opts=', None, "list of f2py command line options"), + ('swig=', None, "path to the SWIG executable"), + ('swig-opts=', None, "list of SWIG command line options"), + ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), + ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete + ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete + ('force', 'f', "forcibly build everything (ignore file timestamps)"), + ('inplace', 'i', + "ignore build-lib and put compiled extensions into the source " + + "directory alongside your pure Python modules"), + ('verbose-cfg', None, + "change logging level from WARN to INFO which will show all " + + "compiler output") + ] + + boolean_options = ['force', 'inplace', 'verbose-cfg'] + + help_options = [] + + def initialize_options(self): + self.extensions = None + self.package = None + self.py_modules = None + self.py_modules_dict = None + self.build_src = None + self.build_lib = None + self.build_base = None + self.force = None + self.inplace = None + self.package_dir = None + self.f2pyflags = None # obsolete + self.f2py_opts = None + self.swigflags = None # obsolete + self.swig_opts = None + self.swig_cpp = None + self.swig = None + self.verbose_cfg = None + + def finalize_options(self): + self.set_undefined_options('build', + ('build_base', 'build_base'), + ('build_lib', 'build_lib'), + ('force', 'force')) + if self.package is None: + self.package = self.distribution.ext_package + self.extensions = self.distribution.ext_modules + self.libraries = self.distribution.libraries or [] + self.py_modules = self.distribution.py_modules or [] + self.data_files = self.distribution.data_files or [] + + if self.build_src is None: + plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) + self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) + + # py_modules_dict is used in build_py.find_package_modules + self.py_modules_dict = {} + + if self.f2pyflags: + if self.f2py_opts: + log.warn('ignoring --f2pyflags as --f2py-opts already used') + else: + self.f2py_opts = self.f2pyflags + self.f2pyflags = None + if self.f2py_opts is None: + self.f2py_opts = [] + else: + self.f2py_opts = shlex.split(self.f2py_opts) + + if self.swigflags: + if self.swig_opts: + log.warn('ignoring --swigflags as --swig-opts already used') + else: + self.swig_opts = self.swigflags + self.swigflags = None + + if self.swig_opts is None: + self.swig_opts = [] + else: + self.swig_opts = shlex.split(self.swig_opts) + + # use options from build_ext command + build_ext = self.get_finalized_command('build_ext') + if self.inplace is None: + self.inplace = build_ext.inplace + if self.swig_cpp is None: + self.swig_cpp = build_ext.swig_cpp + for c in ['swig', 'swig_opt']: + o = '--'+c.replace('_', '-') + v = getattr(build_ext, c, None) + if v: + if getattr(self, c): + log.warn('both build_src and build_ext define %s option' % (o)) + else: + log.info('using "%s=%s" option from build_ext command' % (o, v)) + setattr(self, c, v) + + def run(self): + log.info("build_src") + if not (self.extensions or self.libraries): + return + self.build_sources() + + def build_sources(self): + + if self.inplace: + self.get_package_dir = \ + self.get_finalized_command('build_py').get_package_dir + + self.build_py_modules_sources() + + for libname_info in self.libraries: + self.build_library_sources(*libname_info) + + if self.extensions: + self.check_extensions_list(self.extensions) + + for ext in self.extensions: + self.build_extension_sources(ext) + + self.build_data_files_sources() + self.build_npy_pkg_config() + + def build_data_files_sources(self): + if not self.data_files: + return + log.info('building data_files sources') + from numpy.distutils.misc_util import get_data_files + new_data_files = [] + for data in self.data_files: + if isinstance(data, str): + new_data_files.append(data) + elif isinstance(data, tuple): + d, files = data + if self.inplace: + build_dir = self.get_package_dir('.'.join(d.split(os.sep))) + else: + build_dir = os.path.join(self.build_src, d) + funcs = [f for f in files if hasattr(f, '__call__')] + files = [f for f in files if not hasattr(f, '__call__')] + for f in funcs: + if f.__code__.co_argcount==1: + s = f(build_dir) + else: + s = f() + if s is not None: + if isinstance(s, list): + files.extend(s) + elif isinstance(s, str): + files.append(s) + else: + raise TypeError(repr(s)) + filenames = get_data_files((d, files)) + new_data_files.append((d, filenames)) + else: + raise TypeError(repr(data)) + self.data_files[:] = new_data_files + + + def _build_npy_pkg_config(self, info, gd): + template, install_dir, subst_dict = info + template_dir = os.path.dirname(template) + for k, v in gd.items(): + subst_dict[k] = v + + if self.inplace == 1: + generated_dir = os.path.join(template_dir, install_dir) + else: + generated_dir = os.path.join(self.build_src, template_dir, + install_dir) + generated = os.path.basename(os.path.splitext(template)[0]) + generated_path = os.path.join(generated_dir, generated) + if not os.path.exists(generated_dir): + os.makedirs(generated_dir) + + subst_vars(generated_path, template, subst_dict) + + # Where to install relatively to install prefix + full_install_dir = os.path.join(template_dir, install_dir) + return full_install_dir, generated_path + + def build_npy_pkg_config(self): + log.info('build_src: building npy-pkg config files') + + # XXX: another ugly workaround to circumvent distutils brain damage. We + # need the install prefix here, but finalizing the options of the + # install command when only building sources cause error. Instead, we + # copy the install command instance, and finalize the copy so that it + # does not disrupt how distutils want to do things when with the + # original install command instance. + install_cmd = copy.copy(get_cmd('install')) + if not install_cmd.finalized == 1: + install_cmd.finalize_options() + build_npkg = False + if self.inplace == 1: + top_prefix = '.' + build_npkg = True + elif hasattr(install_cmd, 'install_libbase'): + top_prefix = install_cmd.install_libbase + build_npkg = True + + if build_npkg: + for pkg, infos in self.distribution.installed_pkg_config.items(): + pkg_path = self.distribution.package_dir[pkg] + prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) + d = {'prefix': prefix} + for info in infos: + install_dir, generated = self._build_npy_pkg_config(info, d) + self.distribution.data_files.append((install_dir, + [generated])) + + def build_py_modules_sources(self): + if not self.py_modules: + return + log.info('building py_modules sources') + new_py_modules = [] + for source in self.py_modules: + if is_sequence(source) and len(source)==3: + package, module_base, source = source + if self.inplace: + build_dir = self.get_package_dir(package) + else: + build_dir = os.path.join(self.build_src, + os.path.join(*package.split('.'))) + if hasattr(source, '__call__'): + target = os.path.join(build_dir, module_base + '.py') + source = source(target) + if source is None: + continue + modules = [(package, module_base, source)] + if package not in self.py_modules_dict: + self.py_modules_dict[package] = [] + self.py_modules_dict[package] += modules + else: + new_py_modules.append(source) + self.py_modules[:] = new_py_modules + + def build_library_sources(self, lib_name, build_info): + sources = list(build_info.get('sources', [])) + + if not sources: + return + + log.info('building library "%s" sources' % (lib_name)) + + sources = self.generate_sources(sources, (lib_name, build_info)) + + sources = self.template_sources(sources, (lib_name, build_info)) + + sources, h_files = self.filter_h_files(sources) + + if h_files: + log.info('%s - nothing done with h_files = %s', + self.package, h_files) + + #for f in h_files: + # self.distribution.headers.append((lib_name,f)) + + build_info['sources'] = sources + return + + def build_extension_sources(self, ext): + + sources = list(ext.sources) + + log.info('building extension "%s" sources' % (ext.name)) + + fullname = self.get_ext_fullname(ext.name) + + modpath = fullname.split('.') + package = '.'.join(modpath[0:-1]) + + if self.inplace: + self.ext_target_dir = self.get_package_dir(package) + + sources = self.generate_sources(sources, ext) + sources = self.template_sources(sources, ext) + sources = self.swig_sources(sources, ext) + sources = self.f2py_sources(sources, ext) + sources = self.pyrex_sources(sources, ext) + + sources, py_files = self.filter_py_files(sources) + + if package not in self.py_modules_dict: + self.py_modules_dict[package] = [] + modules = [] + for f in py_files: + module = os.path.splitext(os.path.basename(f))[0] + modules.append((package, module, f)) + self.py_modules_dict[package] += modules + + sources, h_files = self.filter_h_files(sources) + + if h_files: + log.info('%s - nothing done with h_files = %s', + package, h_files) + #for f in h_files: + # self.distribution.headers.append((package,f)) + + ext.sources = sources + + def generate_sources(self, sources, extension): + new_sources = [] + func_sources = [] + for source in sources: + if is_string(source): + new_sources.append(source) + else: + func_sources.append(source) + if not func_sources: + return new_sources + if self.inplace and not is_sequence(extension): + build_dir = self.ext_target_dir + else: + if is_sequence(extension): + name = extension[0] + # if 'include_dirs' not in extension[1]: + # extension[1]['include_dirs'] = [] + # incl_dirs = extension[1]['include_dirs'] + else: + name = extension.name + # incl_dirs = extension.include_dirs + #if self.build_src not in incl_dirs: + # incl_dirs.append(self.build_src) + build_dir = os.path.join(*([self.build_src] + +name.split('.')[:-1])) + self.mkpath(build_dir) + + if self.verbose_cfg: + new_level = log.INFO + else: + new_level = log.WARN + old_level = log.set_threshold(new_level) + + for func in func_sources: + source = func(extension, build_dir) + if not source: + continue + if is_sequence(source): + [log.info(" adding '%s' to sources." % (s,)) for s in source] + new_sources.extend(source) + else: + log.info(" adding '%s' to sources." % (source,)) + new_sources.append(source) + log.set_threshold(old_level) + return new_sources + + def filter_py_files(self, sources): + return self.filter_files(sources, ['.py']) + + def filter_h_files(self, sources): + return self.filter_files(sources, ['.h', '.hpp', '.inc']) + + def filter_files(self, sources, exts = []): + new_sources = [] + files = [] + for source in sources: + (base, ext) = os.path.splitext(source) + if ext in exts: + files.append(source) + else: + new_sources.append(source) + return new_sources, files + + def template_sources(self, sources, extension): + new_sources = [] + if is_sequence(extension): + depends = extension[1].get('depends') + include_dirs = extension[1].get('include_dirs') + else: + depends = extension.depends + include_dirs = extension.include_dirs + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.src': # Template file + if self.inplace: + target_dir = os.path.dirname(base) + else: + target_dir = appendpath(self.build_src, os.path.dirname(base)) + self.mkpath(target_dir) + target_file = os.path.join(target_dir, os.path.basename(base)) + if (self.force or newer_group([source] + depends, target_file)): + if _f_pyf_ext_match(base): + log.info("from_template:> %s" % (target_file)) + outstr = process_f_file(source) + else: + log.info("conv_template:> %s" % (target_file)) + outstr = process_c_file(source) + with open(target_file, 'w') as fid: + fid.write(outstr) + if _header_ext_match(target_file): + d = os.path.dirname(target_file) + if d not in include_dirs: + log.info(" adding '%s' to include_dirs." % (d)) + include_dirs.append(d) + new_sources.append(target_file) + else: + new_sources.append(source) + return new_sources + + def pyrex_sources(self, sources, extension): + """Pyrex not supported; this remains for Cython support (see below)""" + new_sources = [] + ext_name = extension.name.split('.')[-1] + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.pyx': + target_file = self.generate_a_pyrex_source(base, ext_name, + source, + extension) + new_sources.append(target_file) + else: + new_sources.append(source) + return new_sources + + def generate_a_pyrex_source(self, base, ext_name, source, extension): + """Pyrex is not supported, but some projects monkeypatch this method. + + That allows compiling Cython code, see gh-6955. + This method will remain here for compatibility reasons. + """ + return [] + + def f2py_sources(self, sources, extension): + new_sources = [] + f2py_sources = [] + f_sources = [] + f2py_targets = {} + target_dirs = [] + ext_name = extension.name.split('.')[-1] + skip_f2py = 0 + + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.pyf': # F2PY interface file + if self.inplace: + target_dir = os.path.dirname(base) + else: + target_dir = appendpath(self.build_src, os.path.dirname(base)) + if os.path.isfile(source): + name = get_f2py_modulename(source) + if name != ext_name: + raise DistutilsSetupError('mismatch of extension names: %s ' + 'provides %r but expected %r' % ( + source, name, ext_name)) + target_file = os.path.join(target_dir, name+'module.c') + else: + log.debug(' source %s does not exist: skipping f2py\'ing.' \ + % (source)) + name = ext_name + skip_f2py = 1 + target_file = os.path.join(target_dir, name+'module.c') + if not os.path.isfile(target_file): + log.warn(' target %s does not exist:\n '\ + 'Assuming %smodule.c was generated with '\ + '"build_src --inplace" command.' \ + % (target_file, name)) + target_dir = os.path.dirname(base) + target_file = os.path.join(target_dir, name+'module.c') + if not os.path.isfile(target_file): + raise DistutilsSetupError("%r missing" % (target_file,)) + log.info(' Yes! Using %r as up-to-date target.' \ + % (target_file)) + target_dirs.append(target_dir) + f2py_sources.append(source) + f2py_targets[source] = target_file + new_sources.append(target_file) + elif fortran_ext_match(ext): + f_sources.append(source) + else: + new_sources.append(source) + + if not (f2py_sources or f_sources): + return new_sources + + for d in target_dirs: + self.mkpath(d) + + f2py_options = extension.f2py_options + self.f2py_opts + + if self.distribution.libraries: + for name, build_info in self.distribution.libraries: + if name in extension.libraries: + f2py_options.extend(build_info.get('f2py_options', [])) + + log.info("f2py options: %s" % (f2py_options)) + + if f2py_sources: + if len(f2py_sources) != 1: + raise DistutilsSetupError( + 'only one .pyf file is allowed per extension module but got'\ + ' more: %r' % (f2py_sources,)) + source = f2py_sources[0] + target_file = f2py_targets[source] + target_dir = os.path.dirname(target_file) or '.' + depends = [source] + extension.depends + if (self.force or newer_group(depends, target_file, 'newer')) \ + and not skip_f2py: + log.info("f2py: %s" % (source)) + from numpy.f2py import f2py2e + f2py2e.run_main(f2py_options + + ['--build-dir', target_dir, source]) + else: + log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) + else: + #XXX TODO: --inplace support for sdist command + if is_sequence(extension): + name = extension[0] + else: name = extension.name + target_dir = os.path.join(*([self.build_src] + +name.split('.')[:-1])) + target_file = os.path.join(target_dir, ext_name + 'module.c') + new_sources.append(target_file) + depends = f_sources + extension.depends + if (self.force or newer_group(depends, target_file, 'newer')) \ + and not skip_f2py: + log.info("f2py:> %s" % (target_file)) + self.mkpath(target_dir) + from numpy.f2py import f2py2e + f2py2e.run_main(f2py_options + ['--lower', + '--build-dir', target_dir]+\ + ['-m', ext_name]+f_sources) + else: + log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ + % (target_file)) + + if not os.path.isfile(target_file): + raise DistutilsError("f2py target file %r not generated" % (target_file,)) + + build_dir = os.path.join(self.build_src, target_dir) + target_c = os.path.join(build_dir, 'fortranobject.c') + target_h = os.path.join(build_dir, 'fortranobject.h') + log.info(" adding '%s' to sources." % (target_c)) + new_sources.append(target_c) + if build_dir not in extension.include_dirs: + log.info(" adding '%s' to include_dirs." % (build_dir)) + extension.include_dirs.append(build_dir) + + if not skip_f2py: + import numpy.f2py + d = os.path.dirname(numpy.f2py.__file__) + source_c = os.path.join(d, 'src', 'fortranobject.c') + source_h = os.path.join(d, 'src', 'fortranobject.h') + if newer(source_c, target_c) or newer(source_h, target_h): + self.mkpath(os.path.dirname(target_c)) + self.copy_file(source_c, target_c) + self.copy_file(source_h, target_h) + else: + if not os.path.isfile(target_c): + raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) + if not os.path.isfile(target_h): + raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) + + for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: + filename = os.path.join(target_dir, ext_name + name_ext) + if os.path.isfile(filename): + log.info(" adding '%s' to sources." % (filename)) + f_sources.append(filename) + + return new_sources + f_sources + + def swig_sources(self, sources, extension): + # Assuming SWIG 1.3.14 or later. See compatibility note in + # http://www.swig.org/Doc1.3/Python.html#Python_nn6 + + new_sources = [] + swig_sources = [] + swig_targets = {} + target_dirs = [] + py_files = [] # swig generated .py files + target_ext = '.c' + if '-c++' in extension.swig_opts: + typ = 'c++' + is_cpp = True + extension.swig_opts.remove('-c++') + elif self.swig_cpp: + typ = 'c++' + is_cpp = True + else: + typ = None + is_cpp = False + skip_swig = 0 + ext_name = extension.name.split('.')[-1] + + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.i': # SWIG interface file + # the code below assumes that the sources list + # contains not more than one .i SWIG interface file + if self.inplace: + target_dir = os.path.dirname(base) + py_target_dir = self.ext_target_dir + else: + target_dir = appendpath(self.build_src, os.path.dirname(base)) + py_target_dir = target_dir + if os.path.isfile(source): + name = get_swig_modulename(source) + if name != ext_name[1:]: + raise DistutilsSetupError( + 'mismatch of extension names: %s provides %r' + ' but expected %r' % (source, name, ext_name[1:])) + if typ is None: + typ = get_swig_target(source) + is_cpp = typ=='c++' + else: + typ2 = get_swig_target(source) + if typ2 is None: + log.warn('source %r does not define swig target, assuming %s swig target' \ + % (source, typ)) + elif typ!=typ2: + log.warn('expected %r but source %r defines %r swig target' \ + % (typ, source, typ2)) + if typ2=='c++': + log.warn('resetting swig target to c++ (some targets may have .c extension)') + is_cpp = True + else: + log.warn('assuming that %r has c++ swig target' % (source)) + if is_cpp: + target_ext = '.cpp' + target_file = os.path.join(target_dir, '%s_wrap%s' \ + % (name, target_ext)) + else: + log.warn(' source %s does not exist: skipping swig\'ing.' \ + % (source)) + name = ext_name[1:] + skip_swig = 1 + target_file = _find_swig_target(target_dir, name) + if not os.path.isfile(target_file): + log.warn(' target %s does not exist:\n '\ + 'Assuming %s_wrap.{c,cpp} was generated with '\ + '"build_src --inplace" command.' \ + % (target_file, name)) + target_dir = os.path.dirname(base) + target_file = _find_swig_target(target_dir, name) + if not os.path.isfile(target_file): + raise DistutilsSetupError("%r missing" % (target_file,)) + log.warn(' Yes! Using %r as up-to-date target.' \ + % (target_file)) + target_dirs.append(target_dir) + new_sources.append(target_file) + py_files.append(os.path.join(py_target_dir, name+'.py')) + swig_sources.append(source) + swig_targets[source] = new_sources[-1] + else: + new_sources.append(source) + + if not swig_sources: + return new_sources + + if skip_swig: + return new_sources + py_files + + for d in target_dirs: + self.mkpath(d) + + swig = self.swig or self.find_swig() + swig_cmd = [swig, "-python"] + extension.swig_opts + if is_cpp: + swig_cmd.append('-c++') + for d in extension.include_dirs: + swig_cmd.append('-I'+d) + for source in swig_sources: + target = swig_targets[source] + depends = [source] + extension.depends + if self.force or newer_group(depends, target, 'newer'): + log.info("%s: %s" % (os.path.basename(swig) \ + + (is_cpp and '++' or ''), source)) + self.spawn(swig_cmd + self.swig_opts \ + + ["-o", target, '-outdir', py_target_dir, source]) + else: + log.debug(" skipping '%s' swig interface (up-to-date)" \ + % (source)) + + return new_sources + py_files + +_f_pyf_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match +_header_ext_match = re.compile(r'.*\.(inc|h|hpp)\Z', re.I).match + +#### SWIG related auxiliary functions #### +_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', + re.I).match +_has_c_header = re.compile(r'-\*-\s*c\s*-\*-', re.I).search +_has_cpp_header = re.compile(r'-\*-\s*c\+\+\s*-\*-', re.I).search + +def get_swig_target(source): + with open(source) as f: + result = None + line = f.readline() + if _has_cpp_header(line): + result = 'c++' + if _has_c_header(line): + result = 'c' + return result + +def get_swig_modulename(source): + with open(source) as f: + name = None + for line in f: + m = _swig_module_name_match(line) + if m: + name = m.group('name') + break + return name + +def _find_swig_target(target_dir, name): + for ext in ['.cpp', '.c']: + target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) + if os.path.isfile(target): + break + return target + +#### F2PY related auxiliary functions #### + +_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', + re.I).match +_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' + r'__user__[\w_]*)', re.I).match + +def get_f2py_modulename(source): + name = None + with open(source) as f: + for line in f: + m = _f2py_module_name_match(line) + if m: + if _f2py_user_module_name_match(line): # skip *__user__* names + continue + name = m.group('name') + break + return name + +########################################## diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/config.py b/phivenv/Lib/site-packages/numpy/distutils/command/config.py new file mode 100644 index 0000000000000000000000000000000000000000..824991974981c74f60fd8f51617af3851092d1c3 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/config.py @@ -0,0 +1,516 @@ +# Added Fortran compiler support to config. Currently useful only for +# try_compile call. try_run works but is untested for most of Fortran +# compilers (they must define linker_exe first). +# Pearu Peterson +import os +import signal +import subprocess +import sys +import textwrap +import warnings + +from distutils.command.config import config as old_config +from distutils.command.config import LANG_EXT +from distutils import log +from distutils.file_util import copy_file +from distutils.ccompiler import CompileError, LinkError +import distutils +from numpy.distutils.exec_command import filepath_from_subprocess_output +from numpy.distutils.mingw32ccompiler import generate_manifest +from numpy.distutils.command.autodist import (check_gcc_function_attribute, + check_gcc_function_attribute_with_intrinsics, + check_gcc_variable_attribute, + check_gcc_version_at_least, + check_inline, + check_restrict, + check_compiler_gcc) + +LANG_EXT['f77'] = '.f' +LANG_EXT['f90'] = '.f90' + +class config(old_config): + old_config.user_options += [ + ('fcompiler=', None, "specify the Fortran compiler type"), + ] + + def initialize_options(self): + self.fcompiler = None + old_config.initialize_options(self) + + def _check_compiler (self): + old_config._check_compiler(self) + from numpy.distutils.fcompiler import FCompiler, new_fcompiler + + if sys.platform == 'win32' and (self.compiler.compiler_type in + ('msvc', 'intelw', 'intelemw')): + # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: + # initialize call query_vcvarsall, which throws an OSError, and + # causes an error along the way without much information. We try to + # catch it here, hoping it is early enough, and print a helpful + # message instead of Error: None. + if not self.compiler.initialized: + try: + self.compiler.initialize() + except OSError as e: + msg = textwrap.dedent("""\ + Could not initialize compiler instance: do you have Visual Studio + installed? If you are trying to build with MinGW, please use "python setup.py + build -c mingw32" instead. If you have Visual Studio installed, check it is + correctly installed, and the right version (VS 2015 as of this writing). + + Original exception was: %s, and the Compiler class was %s + ============================================================================""") \ + % (e, self.compiler.__class__.__name__) + print(textwrap.dedent("""\ + ============================================================================""")) + raise distutils.errors.DistutilsPlatformError(msg) from e + + # After MSVC is initialized, add an explicit /MANIFEST to linker + # flags. See issues gh-4245 and gh-4101 for details. Also + # relevant are issues 4431 and 16296 on the Python bug tracker. + from distutils import msvc9compiler + if msvc9compiler.get_build_version() >= 10: + for ldflags in [self.compiler.ldflags_shared, + self.compiler.ldflags_shared_debug]: + if '/MANIFEST' not in ldflags: + ldflags.append('/MANIFEST') + + if not isinstance(self.fcompiler, FCompiler): + self.fcompiler = new_fcompiler(compiler=self.fcompiler, + dry_run=self.dry_run, force=1, + c_compiler=self.compiler) + if self.fcompiler is not None: + self.fcompiler.customize(self.distribution) + if self.fcompiler.get_version(): + self.fcompiler.customize_cmd(self) + self.fcompiler.show_customization() + + def _wrap_method(self, mth, lang, args): + from distutils.ccompiler import CompileError + from distutils.errors import DistutilsExecError + save_compiler = self.compiler + if lang in ['f77', 'f90']: + self.compiler = self.fcompiler + if self.compiler is None: + raise CompileError('%s compiler is not set' % (lang,)) + try: + ret = mth(*((self,)+args)) + except (DistutilsExecError, CompileError) as e: + self.compiler = save_compiler + raise CompileError from e + self.compiler = save_compiler + return ret + + def _compile (self, body, headers, include_dirs, lang): + src, obj = self._wrap_method(old_config._compile, lang, + (body, headers, include_dirs, lang)) + # _compile in unixcompiler.py sometimes creates .d dependency files. + # Clean them up. + self.temp_files.append(obj + '.d') + return src, obj + + def _link (self, body, + headers, include_dirs, + libraries, library_dirs, lang): + if self.compiler.compiler_type=='msvc': + libraries = (libraries or [])[:] + library_dirs = (library_dirs or [])[:] + if lang in ['f77', 'f90']: + lang = 'c' # always use system linker when using MSVC compiler + if self.fcompiler: + for d in self.fcompiler.library_dirs or []: + # correct path when compiling in Cygwin but with + # normal Win Python + if d.startswith('/usr/lib'): + try: + d = subprocess.check_output(['cygpath', + '-w', d]) + except (OSError, subprocess.CalledProcessError): + pass + else: + d = filepath_from_subprocess_output(d) + library_dirs.append(d) + for libname in self.fcompiler.libraries or []: + if libname not in libraries: + libraries.append(libname) + for libname in libraries: + if libname.startswith('msvc'): continue + fileexists = False + for libdir in library_dirs or []: + libfile = os.path.join(libdir, '%s.lib' % (libname)) + if os.path.isfile(libfile): + fileexists = True + break + if fileexists: continue + # make g77-compiled static libs available to MSVC + fileexists = False + for libdir in library_dirs: + libfile = os.path.join(libdir, 'lib%s.a' % (libname)) + if os.path.isfile(libfile): + # copy libname.a file to name.lib so that MSVC linker + # can find it + libfile2 = os.path.join(libdir, '%s.lib' % (libname)) + copy_file(libfile, libfile2) + self.temp_files.append(libfile2) + fileexists = True + break + if fileexists: continue + log.warn('could not find library %r in directories %s' \ + % (libname, library_dirs)) + elif self.compiler.compiler_type == 'mingw32': + generate_manifest(self) + return self._wrap_method(old_config._link, lang, + (body, headers, include_dirs, + libraries, library_dirs, lang)) + + def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): + self._check_compiler() + return self.try_compile( + "/* we need a dummy line to make distutils happy */", + [header], include_dirs) + + def check_decl(self, symbol, + headers=None, include_dirs=None): + self._check_compiler() + body = textwrap.dedent(""" + int main(void) + { + #ifndef %s + (void) %s; + #endif + ; + return 0; + }""") % (symbol, symbol) + + return self.try_compile(body, headers, include_dirs) + + def check_macro_true(self, symbol, + headers=None, include_dirs=None): + self._check_compiler() + body = textwrap.dedent(""" + int main(void) + { + #if %s + #else + #error false or undefined macro + #endif + ; + return 0; + }""") % (symbol,) + + return self.try_compile(body, headers, include_dirs) + + def check_type(self, type_name, headers=None, include_dirs=None, + library_dirs=None): + """Check type availability. Return True if the type can be compiled, + False otherwise""" + self._check_compiler() + + # First check the type can be compiled + body = textwrap.dedent(r""" + int main(void) { + if ((%(name)s *) 0) + return 0; + if (sizeof (%(name)s)) + return 0; + } + """) % {'name': type_name} + + st = False + try: + try: + self._compile(body % {'type': type_name}, + headers, include_dirs, 'c') + st = True + except distutils.errors.CompileError: + st = False + finally: + self._clean() + + return st + + def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): + """Check size of a given type.""" + self._check_compiler() + + # First check the type can be compiled + body = textwrap.dedent(r""" + typedef %(type)s npy_check_sizeof_type; + int main (void) + { + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; + test_array [0] = 0 + + ; + return 0; + } + """) + self._compile(body % {'type': type_name}, + headers, include_dirs, 'c') + self._clean() + + if expected: + body = textwrap.dedent(r""" + typedef %(type)s npy_check_sizeof_type; + int main (void) + { + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; + test_array [0] = 0 + + ; + return 0; + } + """) + for size in expected: + try: + self._compile(body % {'type': type_name, 'size': size}, + headers, include_dirs, 'c') + self._clean() + return size + except CompileError: + pass + + # this fails to *compile* if size > sizeof(type) + body = textwrap.dedent(r""" + typedef %(type)s npy_check_sizeof_type; + int main (void) + { + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; + test_array [0] = 0 + + ; + return 0; + } + """) + + # The principle is simple: we first find low and high bounds of size + # for the type, where low/high are looked up on a log scale. Then, we + # do a binary search to find the exact size between low and high + low = 0 + mid = 0 + while True: + try: + self._compile(body % {'type': type_name, 'size': mid}, + headers, include_dirs, 'c') + self._clean() + break + except CompileError: + #log.info("failure to test for bound %d" % mid) + low = mid + 1 + mid = 2 * mid + 1 + + high = mid + # Binary search: + while low != high: + mid = (high - low) // 2 + low + try: + self._compile(body % {'type': type_name, 'size': mid}, + headers, include_dirs, 'c') + self._clean() + high = mid + except CompileError: + low = mid + 1 + return low + + def check_func(self, func, + headers=None, include_dirs=None, + libraries=None, library_dirs=None, + decl=False, call=False, call_args=None): + # clean up distutils's config a bit: add void to main(), and + # return a value. + self._check_compiler() + body = [] + if decl: + if type(decl) == str: + body.append(decl) + else: + body.append("int %s (void);" % func) + # Handle MSVC intrinsics: force MS compiler to make a function call. + # Useful to test for some functions when built with optimization on, to + # avoid build error because the intrinsic and our 'fake' test + # declaration do not match. + body.append("#ifdef _MSC_VER") + body.append("#pragma function(%s)" % func) + body.append("#endif") + body.append("int main (void) {") + if call: + if call_args is None: + call_args = '' + body.append(" %s(%s);" % (func, call_args)) + else: + body.append(" %s;" % func) + body.append(" return 0;") + body.append("}") + body = '\n'.join(body) + "\n" + + return self.try_link(body, headers, include_dirs, + libraries, library_dirs) + + def check_funcs_once(self, funcs, + headers=None, include_dirs=None, + libraries=None, library_dirs=None, + decl=False, call=False, call_args=None): + """Check a list of functions at once. + + This is useful to speed up things, since all the functions in the funcs + list will be put in one compilation unit. + + Arguments + --------- + funcs : seq + list of functions to test + include_dirs : seq + list of header paths + libraries : seq + list of libraries to link the code snippet to + library_dirs : seq + list of library paths + decl : dict + for every (key, value), the declaration in the value will be + used for function in key. If a function is not in the + dictionary, no declaration will be used. + call : dict + for every item (f, value), if the value is True, a call will be + done to the function f. + """ + self._check_compiler() + body = [] + if decl: + for f, v in decl.items(): + if v: + body.append("int %s (void);" % f) + + # Handle MS intrinsics. See check_func for more info. + body.append("#ifdef _MSC_VER") + for func in funcs: + body.append("#pragma function(%s)" % func) + body.append("#endif") + + body.append("int main (void) {") + if call: + for f in funcs: + if f in call and call[f]: + if not (call_args and f in call_args and call_args[f]): + args = '' + else: + args = call_args[f] + body.append(" %s(%s);" % (f, args)) + else: + body.append(" %s;" % f) + else: + for f in funcs: + body.append(" %s;" % f) + body.append(" return 0;") + body.append("}") + body = '\n'.join(body) + "\n" + + return self.try_link(body, headers, include_dirs, + libraries, library_dirs) + + def check_inline(self): + """Return the inline keyword recognized by the compiler, empty string + otherwise.""" + return check_inline(self) + + def check_restrict(self): + """Return the restrict keyword recognized by the compiler, empty string + otherwise.""" + return check_restrict(self) + + def check_compiler_gcc(self): + """Return True if the C compiler is gcc""" + return check_compiler_gcc(self) + + def check_gcc_function_attribute(self, attribute, name): + return check_gcc_function_attribute(self, attribute, name) + + def check_gcc_function_attribute_with_intrinsics(self, attribute, name, + code, include): + return check_gcc_function_attribute_with_intrinsics(self, attribute, + name, code, include) + + def check_gcc_variable_attribute(self, attribute): + return check_gcc_variable_attribute(self, attribute) + + def check_gcc_version_at_least(self, major, minor=0, patchlevel=0): + """Return True if the GCC version is greater than or equal to the + specified version.""" + return check_gcc_version_at_least(self, major, minor, patchlevel) + + def get_output(self, body, headers=None, include_dirs=None, + libraries=None, library_dirs=None, + lang="c", use_tee=None): + """Try to compile, link to an executable, and run a program + built from 'body' and 'headers'. Returns the exit status code + of the program and its output. + """ + # 2008-11-16, RemoveMe + warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" + "Usage of get_output is deprecated: please do not \n" + "use it anymore, and avoid configuration checks \n" + "involving running executable on the target machine.\n" + "+++++++++++++++++++++++++++++++++++++++++++++++++\n", + DeprecationWarning, stacklevel=2) + self._check_compiler() + exitcode, output = 255, '' + try: + grabber = GrabStdout() + try: + src, obj, exe = self._link(body, headers, include_dirs, + libraries, library_dirs, lang) + grabber.restore() + except Exception: + output = grabber.data + grabber.restore() + raise + exe = os.path.join('.', exe) + try: + # specify cwd arg for consistency with + # historic usage pattern of exec_command() + # also, note that exe appears to be a string, + # which exec_command() handled, but we now + # use a list for check_output() -- this assumes + # that exe is always a single command + output = subprocess.check_output([exe], cwd='.') + except subprocess.CalledProcessError as exc: + exitstatus = exc.returncode + output = '' + except OSError: + # preserve the EnvironmentError exit status + # used historically in exec_command() + exitstatus = 127 + output = '' + else: + output = filepath_from_subprocess_output(output) + if hasattr(os, 'WEXITSTATUS'): + exitcode = os.WEXITSTATUS(exitstatus) + if os.WIFSIGNALED(exitstatus): + sig = os.WTERMSIG(exitstatus) + log.error('subprocess exited with signal %d' % (sig,)) + if sig == signal.SIGINT: + # control-C + raise KeyboardInterrupt + else: + exitcode = exitstatus + log.info("success!") + except (CompileError, LinkError): + log.info("failure.") + self._clean() + return exitcode, output + +class GrabStdout: + + def __init__(self): + self.sys_stdout = sys.stdout + self.data = '' + sys.stdout = self + + def write (self, data): + self.sys_stdout.write(data) + self.data += data + + def flush (self): + self.sys_stdout.flush() + + def restore(self): + sys.stdout = self.sys_stdout diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/config_compiler.py b/phivenv/Lib/site-packages/numpy/distutils/command/config_compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..83a8a39b1d082faae48ce6c605bda49dd183e363 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/config_compiler.py @@ -0,0 +1,126 @@ +from distutils.core import Command +from numpy.distutils import log + +#XXX: Linker flags + +def show_fortran_compilers(_cache=None): + # Using cache to prevent infinite recursion. + if _cache: + return + elif _cache is None: + _cache = [] + _cache.append(1) + from numpy.distutils.fcompiler import show_fcompilers + import distutils.core + dist = distutils.core._setup_distribution + show_fcompilers(dist) + +class config_fc(Command): + """ Distutils command to hold user specified options + to Fortran compilers. + + config_fc command is used by the FCompiler.customize() method. + """ + + description = "specify Fortran 77/Fortran 90 compiler information" + + user_options = [ + ('fcompiler=', None, "specify Fortran compiler type"), + ('f77exec=', None, "specify F77 compiler command"), + ('f90exec=', None, "specify F90 compiler command"), + ('f77flags=', None, "specify F77 compiler flags"), + ('f90flags=', None, "specify F90 compiler flags"), + ('opt=', None, "specify optimization flags"), + ('arch=', None, "specify architecture specific optimization flags"), + ('debug', 'g', "compile with debugging information"), + ('noopt', None, "compile without optimization"), + ('noarch', None, "compile without arch-dependent optimization"), + ] + + help_options = [ + ('help-fcompiler', None, "list available Fortran compilers", + show_fortran_compilers), + ] + + boolean_options = ['debug', 'noopt', 'noarch'] + + def initialize_options(self): + self.fcompiler = None + self.f77exec = None + self.f90exec = None + self.f77flags = None + self.f90flags = None + self.opt = None + self.arch = None + self.debug = None + self.noopt = None + self.noarch = None + + def finalize_options(self): + log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options') + build_clib = self.get_finalized_command('build_clib') + build_ext = self.get_finalized_command('build_ext') + config = self.get_finalized_command('config') + build = self.get_finalized_command('build') + cmd_list = [self, config, build_clib, build_ext, build] + for a in ['fcompiler']: + l = [] + for c in cmd_list: + v = getattr(c, a) + if v is not None: + if not isinstance(v, str): v = v.compiler_type + if v not in l: l.append(v) + if not l: v1 = None + else: v1 = l[0] + if len(l)>1: + log.warn(' commands have different --%s options: %s'\ + ', using first in list as default' % (a, l)) + if v1: + for c in cmd_list: + if getattr(c, a) is None: setattr(c, a, v1) + + def run(self): + # Do nothing. + return + +class config_cc(Command): + """ Distutils command to hold user specified options + to C/C++ compilers. + """ + + description = "specify C/C++ compiler information" + + user_options = [ + ('compiler=', None, "specify C/C++ compiler type"), + ] + + def initialize_options(self): + self.compiler = None + + def finalize_options(self): + log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options') + build_clib = self.get_finalized_command('build_clib') + build_ext = self.get_finalized_command('build_ext') + config = self.get_finalized_command('config') + build = self.get_finalized_command('build') + cmd_list = [self, config, build_clib, build_ext, build] + for a in ['compiler']: + l = [] + for c in cmd_list: + v = getattr(c, a) + if v is not None: + if not isinstance(v, str): v = v.compiler_type + if v not in l: l.append(v) + if not l: v1 = None + else: v1 = l[0] + if len(l)>1: + log.warn(' commands have different --%s options: %s'\ + ', using first in list as default' % (a, l)) + if v1: + for c in cmd_list: + if getattr(c, a) is None: setattr(c, a, v1) + return + + def run(self): + # Do nothing. + return diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/develop.py b/phivenv/Lib/site-packages/numpy/distutils/command/develop.py new file mode 100644 index 0000000000000000000000000000000000000000..5cef66ce42387601d9364051f63ad92308a7e1c3 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/develop.py @@ -0,0 +1,15 @@ +""" Override the develop command from setuptools so we can ensure that our +generated files (from build_src or build_scripts) are properly converted to real +files with filenames. + +""" +from setuptools.command.develop import develop as old_develop + +class develop(old_develop): + __doc__ = old_develop.__doc__ + def install_for_development(self): + # Build sources in-place, too. + self.reinitialize_command('build_src', inplace=1) + # Make sure scripts are built. + self.run_command('build_scripts') + old_develop.install_for_development(self) diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/egg_info.py b/phivenv/Lib/site-packages/numpy/distutils/command/egg_info.py new file mode 100644 index 0000000000000000000000000000000000000000..c59836d23161ba0653e8350d979e2365f629a38e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/egg_info.py @@ -0,0 +1,25 @@ +import sys + +from setuptools.command.egg_info import egg_info as _egg_info + +class egg_info(_egg_info): + def run(self): + if 'sdist' in sys.argv: + import warnings + import textwrap + msg = textwrap.dedent(""" + `build_src` is being run, this may lead to missing + files in your sdist! You want to use distutils.sdist + instead of the setuptools version: + + from distutils.command.sdist import sdist + cmdclass={'sdist': sdist}" + + See numpy's setup.py or gh-7131 for details.""") + warnings.warn(msg, UserWarning, stacklevel=2) + + # We need to ensure that build_src has been executed in order to give + # setuptools' egg_info command real filenames instead of functions which + # generate files. + self.run_command("build_src") + _egg_info.run(self) diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/install.py b/phivenv/Lib/site-packages/numpy/distutils/command/install.py new file mode 100644 index 0000000000000000000000000000000000000000..f2b47b1c32603678b38668bcd10285021837f394 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/install.py @@ -0,0 +1,79 @@ +import sys +if 'setuptools' in sys.modules: + import setuptools.command.install as old_install_mod + have_setuptools = True +else: + import distutils.command.install as old_install_mod + have_setuptools = False +from distutils.file_util import write_file + +old_install = old_install_mod.install + +class install(old_install): + + # Always run install_clib - the command is cheap, so no need to bypass it; + # but it's not run by setuptools -- so it's run again in install_data + sub_commands = old_install.sub_commands + [ + ('install_clib', lambda x: True) + ] + + def finalize_options (self): + old_install.finalize_options(self) + self.install_lib = self.install_libbase + + def setuptools_run(self): + """ The setuptools version of the .run() method. + + We must pull in the entire code so we can override the level used in the + _getframe() call since we wrap this call by one more level. + """ + from distutils.command.install import install as distutils_install + + # Explicit request for old-style install? Just do it + if self.old_and_unmanageable or self.single_version_externally_managed: + return distutils_install.run(self) + + # Attempt to detect whether we were called from setup() or by another + # command. If we were called by setup(), our caller will be the + # 'run_command' method in 'distutils.dist', and *its* caller will be + # the 'run_commands' method. If we were called any other way, our + # immediate caller *might* be 'run_command', but it won't have been + # called by 'run_commands'. This is slightly kludgy, but seems to + # work. + # + caller = sys._getframe(3) + caller_module = caller.f_globals.get('__name__', '') + caller_name = caller.f_code.co_name + + if caller_module != 'distutils.dist' or caller_name!='run_commands': + # We weren't called from the command line or setup(), so we + # should run in backward-compatibility mode to support bdist_* + # commands. + distutils_install.run(self) + else: + self.do_egg_install() + + def run(self): + if not have_setuptools: + r = old_install.run(self) + else: + r = self.setuptools_run() + if self.record: + # bdist_rpm fails when INSTALLED_FILES contains + # paths with spaces. Such paths must be enclosed + # with double-quotes. + with open(self.record) as f: + lines = [] + need_rewrite = False + for l in f: + l = l.rstrip() + if ' ' in l: + need_rewrite = True + l = '"%s"' % (l) + lines.append(l) + if need_rewrite: + self.execute(write_file, + (self.record, lines), + "re-writing list of installed files to '%s'" % + self.record) + return r diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/install_clib.py b/phivenv/Lib/site-packages/numpy/distutils/command/install_clib.py new file mode 100644 index 0000000000000000000000000000000000000000..1a0569ae5d60c9129ec4309291056923f496700f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/install_clib.py @@ -0,0 +1,40 @@ +import os +from distutils.core import Command +from distutils.ccompiler import new_compiler +from numpy.distutils.misc_util import get_cmd + +class install_clib(Command): + description = "Command to install installable C libraries" + + user_options = [] + + def initialize_options(self): + self.install_dir = None + self.outfiles = [] + + def finalize_options(self): + self.set_undefined_options('install', ('install_lib', 'install_dir')) + + def run (self): + build_clib_cmd = get_cmd("build_clib") + if not build_clib_cmd.build_clib: + # can happen if the user specified `--skip-build` + build_clib_cmd.finalize_options() + build_dir = build_clib_cmd.build_clib + + # We need the compiler to get the library name -> filename association + if not build_clib_cmd.compiler: + compiler = new_compiler(compiler=None) + compiler.customize(self.distribution) + else: + compiler = build_clib_cmd.compiler + + for l in self.distribution.installed_libraries: + target_dir = os.path.join(self.install_dir, l.target_dir) + name = compiler.library_filename(l.name) + source = os.path.join(build_dir, name) + self.mkpath(target_dir) + self.outfiles.append(self.copy_file(source, target_dir)[0]) + + def get_outputs(self): + return self.outfiles diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/install_data.py b/phivenv/Lib/site-packages/numpy/distutils/command/install_data.py new file mode 100644 index 0000000000000000000000000000000000000000..0bc0ce422bad47d5b69963b9906b68d7e971ac33 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/install_data.py @@ -0,0 +1,24 @@ +import sys +have_setuptools = ('setuptools' in sys.modules) + +from distutils.command.install_data import install_data as old_install_data + +#data installer with improved intelligence over distutils +#data files are copied into the project directory instead +#of willy-nilly +class install_data (old_install_data): + + def run(self): + old_install_data.run(self) + + if have_setuptools: + # Run install_clib again, since setuptools does not run sub-commands + # of install automatically + self.run_command('install_clib') + + def finalize_options (self): + self.set_undefined_options('install', + ('install_lib', 'install_dir'), + ('root', 'root'), + ('force', 'force'), + ) diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/install_headers.py b/phivenv/Lib/site-packages/numpy/distutils/command/install_headers.py new file mode 100644 index 0000000000000000000000000000000000000000..c4fcbbd25617c79e9de324e4c4b94e1a5595d2a5 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/install_headers.py @@ -0,0 +1,25 @@ +import os +from distutils.command.install_headers import install_headers as old_install_headers + +class install_headers (old_install_headers): + + def run (self): + headers = self.distribution.headers + if not headers: + return + + prefix = os.path.dirname(self.install_dir) + for header in headers: + if isinstance(header, tuple): + # Kind of a hack, but I don't know where else to change this... + if header[0] == 'numpy._core': + header = ('numpy', header[1]) + if os.path.splitext(header[1])[1] == '.inc': + continue + d = os.path.join(*([prefix]+header[0].split('.'))) + header = header[1] + else: + d = self.install_dir + self.mkpath(d) + (out, _) = self.copy_file(header, d) + self.outfiles.append(out) diff --git a/phivenv/Lib/site-packages/numpy/distutils/command/sdist.py b/phivenv/Lib/site-packages/numpy/distutils/command/sdist.py new file mode 100644 index 0000000000000000000000000000000000000000..ed0e75cacf5e667499a1d3dee6eac4223c579c46 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/command/sdist.py @@ -0,0 +1,27 @@ +import sys +if 'setuptools' in sys.modules: + from setuptools.command.sdist import sdist as old_sdist +else: + from distutils.command.sdist import sdist as old_sdist + +from numpy.distutils.misc_util import get_data_files + +class sdist(old_sdist): + + def add_defaults (self): + old_sdist.add_defaults(self) + + dist = self.distribution + + if dist.has_data_files(): + for data in dist.data_files: + self.filelist.extend(get_data_files(data)) + + if dist.has_headers(): + headers = [] + for h in dist.headers: + if isinstance(h, str): headers.append(h) + else: headers.append(h[1]) + self.filelist.extend(headers) + + return diff --git a/phivenv/Lib/site-packages/numpy/distutils/conv_template.py b/phivenv/Lib/site-packages/numpy/distutils/conv_template.py new file mode 100644 index 0000000000000000000000000000000000000000..223c4a2751e558e6a9b9507f5bc54963de53dbb5 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/conv_template.py @@ -0,0 +1,329 @@ +#!/usr/bin/env python3 +""" +takes templated file .xxx.src and produces .xxx file where .xxx is +.i or .c or .h, using the following template rules + +/**begin repeat -- on a line by itself marks the start of a repeated code + segment +/**end repeat**/ -- on a line by itself marks it's end + +After the /**begin repeat and before the */, all the named templates are placed +these should all have the same number of replacements + +Repeat blocks can be nested, with each nested block labeled with its depth, +i.e. +/**begin repeat1 + *.... + */ +/**end repeat1**/ + +When using nested loops, you can optionally exclude particular +combinations of the variables using (inside the comment portion of the inner loop): + + :exclude: var1=value1, var2=value2, ... + +This will exclude the pattern where var1 is value1 and var2 is value2 when +the result is being generated. + + +In the main body each replace will use one entry from the list of named replacements + + Note that all #..# forms in a block must have the same number of + comma-separated entries. + +Example: + + An input file containing + + /**begin repeat + * #a = 1,2,3# + * #b = 1,2,3# + */ + + /**begin repeat1 + * #c = ted, jim# + */ + @a@, @b@, @c@ + /**end repeat1**/ + + /**end repeat**/ + + produces + + line 1 "template.c.src" + + /* + ********************************************************************* + ** This file was autogenerated from a template DO NOT EDIT!!** + ** Changes should be made to the original source (.src) file ** + ********************************************************************* + */ + + #line 9 + 1, 1, ted + + #line 9 + 1, 1, jim + + #line 9 + 2, 2, ted + + #line 9 + 2, 2, jim + + #line 9 + 3, 3, ted + + #line 9 + 3, 3, jim + +""" + +__all__ = ['process_str', 'process_file'] + +import os +import sys +import re + +# names for replacement that are already global. +global_names = {} + +# header placed at the front of head processed file +header =\ +""" +/* + ***************************************************************************** + ** This file was autogenerated from a template DO NOT EDIT!!!! ** + ** Changes should be made to the original source (.src) file ** + ***************************************************************************** + */ + +""" +# Parse string for repeat loops +def parse_structure(astr, level): + """ + The returned line number is from the beginning of the string, starting + at zero. Returns an empty list if no loops found. + + """ + if level == 0 : + loopbeg = "/**begin repeat" + loopend = "/**end repeat**/" + else : + loopbeg = "/**begin repeat%d" % level + loopend = "/**end repeat%d**/" % level + + ind = 0 + line = 0 + spanlist = [] + while True: + start = astr.find(loopbeg, ind) + if start == -1: + break + start2 = astr.find("*/", start) + start2 = astr.find("\n", start2) + fini1 = astr.find(loopend, start2) + fini2 = astr.find("\n", fini1) + line += astr.count("\n", ind, start2+1) + spanlist.append((start, start2+1, fini1, fini2+1, line)) + line += astr.count("\n", start2+1, fini2) + ind = fini2 + spanlist.sort() + return spanlist + + +def paren_repl(obj): + torep = obj.group(1) + numrep = obj.group(2) + return ','.join([torep]*int(numrep)) + +parenrep = re.compile(r"\(([^)]*)\)\*(\d+)") +plainrep = re.compile(r"([^*]+)\*(\d+)") +def parse_values(astr): + # replaces all occurrences of '(a,b,c)*4' in astr + # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate + # empty values, i.e., ()*4 yields ',,,'. The result is + # split at ',' and a list of values returned. + astr = parenrep.sub(paren_repl, astr) + # replaces occurrences of xxx*3 with xxx, xxx, xxx + astr = ','.join([plainrep.sub(paren_repl, x.strip()) + for x in astr.split(',')]) + return astr.split(',') + + +stripast = re.compile(r"\n\s*\*?") +named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") +exclude_vars_re = re.compile(r"(\w*)=(\w*)") +exclude_re = re.compile(":exclude:") +def parse_loop_header(loophead) : + """Find all named replacements in the header + + Returns a list of dictionaries, one for each loop iteration, + where each key is a name to be substituted and the corresponding + value is the replacement string. + + Also return a list of exclusions. The exclusions are dictionaries + of key value pairs. There can be more than one exclusion. + [{'var1':'value1', 'var2', 'value2'[,...]}, ...] + + """ + # Strip out '\n' and leading '*', if any, in continuation lines. + # This should not effect code previous to this change as + # continuation lines were not allowed. + loophead = stripast.sub("", loophead) + # parse out the names and lists of values + names = [] + reps = named_re.findall(loophead) + nsub = None + for rep in reps: + name = rep[0] + vals = parse_values(rep[1]) + size = len(vals) + if nsub is None : + nsub = size + elif nsub != size : + msg = "Mismatch in number of values, %d != %d\n%s = %s" + raise ValueError(msg % (nsub, size, name, vals)) + names.append((name, vals)) + + + # Find any exclude variables + excludes = [] + + for obj in exclude_re.finditer(loophead): + span = obj.span() + # find next newline + endline = loophead.find('\n', span[1]) + substr = loophead[span[1]:endline] + ex_names = exclude_vars_re.findall(substr) + excludes.append(dict(ex_names)) + + # generate list of dictionaries, one for each template iteration + dlist = [] + if nsub is None : + raise ValueError("No substitution variables found") + for i in range(nsub): + tmp = {name: vals[i] for name, vals in names} + dlist.append(tmp) + return dlist + +replace_re = re.compile(r"@(\w+)@") +def parse_string(astr, env, level, line) : + lineno = "#line %d\n" % line + + # local function for string replacement, uses env + def replace(match): + name = match.group(1) + try : + val = env[name] + except KeyError: + msg = 'line %d: no definition of key "%s"'%(line, name) + raise ValueError(msg) from None + return val + + code = [lineno] + struct = parse_structure(astr, level) + if struct : + # recurse over inner loops + oldend = 0 + newlevel = level + 1 + for sub in struct: + pref = astr[oldend:sub[0]] + head = astr[sub[0]:sub[1]] + text = astr[sub[1]:sub[2]] + oldend = sub[3] + newline = line + sub[4] + code.append(replace_re.sub(replace, pref)) + try : + envlist = parse_loop_header(head) + except ValueError as e: + msg = "line %d: %s" % (newline, e) + raise ValueError(msg) + for newenv in envlist : + newenv.update(env) + newcode = parse_string(text, newenv, newlevel, newline) + code.extend(newcode) + suff = astr[oldend:] + code.append(replace_re.sub(replace, suff)) + else : + # replace keys + code.append(replace_re.sub(replace, astr)) + code.append('\n') + return ''.join(code) + +def process_str(astr): + code = [header] + code.extend(parse_string(astr, global_names, 0, 1)) + return ''.join(code) + + +include_src_re = re.compile(r"(\n|\A)#include\s*['\"]" + r"(?P[\w\d./\\]+[.]src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + with open(source) as fid: + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + return lines + +def process_file(source): + lines = resolve_includes(source) + sourcefile = os.path.normcase(source).replace("\\", "\\\\") + try: + code = process_str(''.join(lines)) + except ValueError as e: + raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None + return '#line 1 "%s"\n%s' % (sourcefile, code) + + +def unique_key(adict): + # this obtains a unique key given a dictionary + # currently it works by appending together n of the letters of the + # current keys and increasing n until a unique key is found + # -- not particularly quick + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = "".join([x[:n] for x in allkeys]) + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +def main(): + try: + file = sys.argv[1] + except IndexError: + fid = sys.stdin + outfile = sys.stdout + else: + fid = open(file, 'r') + (base, ext) = os.path.splitext(file) + newname = base + outfile = open(newname, 'w') + + allstr = fid.read() + try: + writestr = process_str(allstr) + except ValueError as e: + raise ValueError("In %s loop at %s" % (file, e)) from None + + outfile.write(writestr) + +if __name__ == "__main__": + main() diff --git a/phivenv/Lib/site-packages/numpy/distutils/core.py b/phivenv/Lib/site-packages/numpy/distutils/core.py new file mode 100644 index 0000000000000000000000000000000000000000..6b6ba885edd86d494af88275394736a7cd9f8858 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/core.py @@ -0,0 +1,215 @@ +import sys +from distutils.core import Distribution + +if 'setuptools' in sys.modules: + have_setuptools = True + from setuptools import setup as old_setup + # easy_install imports math, it may be picked up from cwd + from setuptools.command import easy_install + try: + # very old versions of setuptools don't have this + from setuptools.command import bdist_egg + except ImportError: + have_setuptools = False +else: + from distutils.core import setup as old_setup + have_setuptools = False + +import warnings +import distutils.core +import distutils.dist + +from numpy.distutils.extension import Extension # noqa: F401 +from numpy.distutils.numpy_distribution import NumpyDistribution +from numpy.distutils.command import config, config_compiler, \ + build, build_py, build_ext, build_clib, build_src, build_scripts, \ + sdist, install_data, install_headers, install, bdist_rpm, \ + install_clib +from numpy.distutils.misc_util import is_sequence, is_string + +numpy_cmdclass = {'build': build.build, + 'build_src': build_src.build_src, + 'build_scripts': build_scripts.build_scripts, + 'config_cc': config_compiler.config_cc, + 'config_fc': config_compiler.config_fc, + 'config': config.config, + 'build_ext': build_ext.build_ext, + 'build_py': build_py.build_py, + 'build_clib': build_clib.build_clib, + 'sdist': sdist.sdist, + 'install_data': install_data.install_data, + 'install_headers': install_headers.install_headers, + 'install_clib': install_clib.install_clib, + 'install': install.install, + 'bdist_rpm': bdist_rpm.bdist_rpm, + } +if have_setuptools: + # Use our own versions of develop and egg_info to ensure that build_src is + # handled appropriately. + from numpy.distutils.command import develop, egg_info + numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg + numpy_cmdclass['develop'] = develop.develop + numpy_cmdclass['easy_install'] = easy_install.easy_install + numpy_cmdclass['egg_info'] = egg_info.egg_info + +def _dict_append(d, **kws): + for k, v in kws.items(): + if k not in d: + d[k] = v + continue + dv = d[k] + if isinstance(dv, tuple): + d[k] = dv + tuple(v) + elif isinstance(dv, list): + d[k] = dv + list(v) + elif isinstance(dv, dict): + _dict_append(dv, **v) + elif is_string(dv): + d[k] = dv + v + else: + raise TypeError(repr(type(dv))) + +def _command_line_ok(_cache=None): + """ Return True if command line does not contain any + help or display requests. + """ + if _cache: + return _cache[0] + elif _cache is None: + _cache = [] + ok = True + display_opts = ['--'+n for n in Distribution.display_option_names] + for o in Distribution.display_options: + if o[1]: + display_opts.append('-'+o[1]) + for arg in sys.argv: + if arg.startswith('--help') or arg=='-h' or arg in display_opts: + ok = False + break + _cache.append(ok) + return ok + +def get_distribution(always=False): + dist = distutils.core._setup_distribution + # XXX Hack to get numpy installable with easy_install. + # The problem is easy_install runs it's own setup(), which + # sets up distutils.core._setup_distribution. However, + # when our setup() runs, that gets overwritten and lost. + # We can't use isinstance, as the DistributionWithoutHelpCommands + # class is local to a function in setuptools.command.easy_install + if dist is not None and \ + 'DistributionWithoutHelpCommands' in repr(dist): + dist = None + if always and dist is None: + dist = NumpyDistribution() + return dist + +def setup(**attr): + + cmdclass = numpy_cmdclass.copy() + + new_attr = attr.copy() + if 'cmdclass' in new_attr: + cmdclass.update(new_attr['cmdclass']) + new_attr['cmdclass'] = cmdclass + + if 'configuration' in new_attr: + # To avoid calling configuration if there are any errors + # or help request in command in the line. + configuration = new_attr.pop('configuration') + + old_dist = distutils.core._setup_distribution + old_stop = distutils.core._setup_stop_after + distutils.core._setup_distribution = None + distutils.core._setup_stop_after = "commandline" + try: + dist = setup(**new_attr) + finally: + distutils.core._setup_distribution = old_dist + distutils.core._setup_stop_after = old_stop + if dist.help or not _command_line_ok(): + # probably displayed help, skip running any commands + return dist + + # create setup dictionary and append to new_attr + config = configuration() + if hasattr(config, 'todict'): + config = config.todict() + _dict_append(new_attr, **config) + + # Move extension source libraries to libraries + libraries = [] + for ext in new_attr.get('ext_modules', []): + new_libraries = [] + for item in ext.libraries: + if is_sequence(item): + lib_name, build_info = item + _check_append_ext_library(libraries, lib_name, build_info) + new_libraries.append(lib_name) + elif is_string(item): + new_libraries.append(item) + else: + raise TypeError("invalid description of extension module " + "library %r" % (item,)) + ext.libraries = new_libraries + if libraries: + if 'libraries' not in new_attr: + new_attr['libraries'] = [] + for item in libraries: + _check_append_library(new_attr['libraries'], item) + + # sources in ext_modules or libraries may contain header files + if ('ext_modules' in new_attr or 'libraries' in new_attr) \ + and 'headers' not in new_attr: + new_attr['headers'] = [] + + # Use our custom NumpyDistribution class instead of distutils' one + new_attr['distclass'] = NumpyDistribution + + return old_setup(**new_attr) + +def _check_append_library(libraries, item): + for libitem in libraries: + if is_sequence(libitem): + if is_sequence(item): + if item[0]==libitem[0]: + if item[1] is libitem[1]: + return + warnings.warn("[0] libraries list contains %r with" + " different build_info" % (item[0],), + stacklevel=2) + break + else: + if item==libitem[0]: + warnings.warn("[1] libraries list contains %r with" + " no build_info" % (item[0],), + stacklevel=2) + break + else: + if is_sequence(item): + if item[0]==libitem: + warnings.warn("[2] libraries list contains %r with" + " no build_info" % (item[0],), + stacklevel=2) + break + else: + if item==libitem: + return + libraries.append(item) + +def _check_append_ext_library(libraries, lib_name, build_info): + for item in libraries: + if is_sequence(item): + if item[0]==lib_name: + if item[1] is build_info: + return + warnings.warn("[3] libraries list contains %r with" + " different build_info" % (lib_name,), + stacklevel=2) + break + elif item==lib_name: + warnings.warn("[4] libraries list contains %r with" + " no build_info" % (lib_name,), + stacklevel=2) + break + libraries.append((lib_name, build_info)) diff --git a/phivenv/Lib/site-packages/numpy/distutils/cpuinfo.py b/phivenv/Lib/site-packages/numpy/distutils/cpuinfo.py new file mode 100644 index 0000000000000000000000000000000000000000..f47e2ad7937755f5dddcdefb2d4aae1f546b3f3b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/cpuinfo.py @@ -0,0 +1,683 @@ +#!/usr/bin/env python3 +""" +cpuinfo + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +Pearu Peterson + +""" +__all__ = ['cpu'] + +import os +import platform +import re +import sys +import types +import warnings + +from subprocess import getstatusoutput + + +def getoutput(cmd, successful_status=(0,), stacklevel=1): + try: + status, output = getstatusoutput(cmd) + except OSError as e: + warnings.warn(str(e), UserWarning, stacklevel=stacklevel) + return False, "" + if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: + return True, output + return False, output + +def command_info(successful_status=(0,), stacklevel=1, **kw): + info = {} + for key in kw: + ok, output = getoutput(kw[key], successful_status=successful_status, + stacklevel=stacklevel+1) + if ok: + info[key] = output.strip() + return info + +def command_by_line(cmd, successful_status=(0,), stacklevel=1): + ok, output = getoutput(cmd, successful_status=successful_status, + stacklevel=stacklevel+1) + if not ok: + return + for line in output.splitlines(): + yield line.strip() + +def key_value_from_command(cmd, sep, successful_status=(0,), + stacklevel=1): + d = {} + for line in command_by_line(cmd, successful_status=successful_status, + stacklevel=stacklevel+1): + l = [s.strip() for s in line.split(sep, 1)] + if len(l) == 2: + d[l[0]] = l[1] + return d + +class CPUInfoBase: + """Holds CPU information and provides methods for requiring + the availability of various CPU features. + """ + + def _try_call(self, func): + try: + return func() + except Exception: + pass + + def __getattr__(self, name): + if not name.startswith('_'): + if hasattr(self, '_'+name): + attr = getattr(self, '_'+name) + if isinstance(attr, types.MethodType): + return lambda func=self._try_call,attr=attr : func(attr) + else: + return lambda : None + raise AttributeError(name) + + def _getNCPUs(self): + return 1 + + def __get_nbits(self): + abits = platform.architecture()[0] + nbits = re.compile(r'(\d+)bit').search(abits).group(1) + return nbits + + def _is_32bit(self): + return self.__get_nbits() == '32' + + def _is_64bit(self): + return self.__get_nbits() == '64' + +class LinuxCPUInfo(CPUInfoBase): + + info = None + + def __init__(self): + if self.info is not None: + return + info = [ {} ] + ok, output = getoutput('uname -m') + if ok: + info[0]['uname_m'] = output.strip() + try: + fo = open('/proc/cpuinfo') + except OSError as e: + warnings.warn(str(e), UserWarning, stacklevel=2) + else: + for line in fo: + name_value = [s.strip() for s in line.split(':', 1)] + if len(name_value) != 2: + continue + name, value = name_value + if not info or name in info[-1]: # next processor + info.append({}) + info[-1][name] = value + fo.close() + self.__class__.info = info + + def _not_impl(self): pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['vendor_id']=='AuthenticAMD' + + def _is_AthlonK6_2(self): + return self._is_AMD() and self.info[0]['model'] == '2' + + def _is_AthlonK6_3(self): + return self._is_AMD() and self.info[0]['model'] == '3' + + def _is_AthlonK6(self): + return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None + + def _is_AthlonK7(self): + return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None + + def _is_AthlonMP(self): + return re.match(r'.*?Athlon\(tm\) MP\b', + self.info[0]['model name']) is not None + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['family'] == '15' + + def _is_Athlon64(self): + return re.match(r'.*?Athlon\(tm\) 64\b', + self.info[0]['model name']) is not None + + def _is_AthlonHX(self): + return re.match(r'.*?Athlon HX\b', + self.info[0]['model name']) is not None + + def _is_Opteron(self): + return re.match(r'.*?Opteron\b', + self.info[0]['model name']) is not None + + def _is_Hammer(self): + return re.match(r'.*?Hammer\b', + self.info[0]['model name']) is not None + + # Alpha + + def _is_Alpha(self): + return self.info[0]['cpu']=='Alpha' + + def _is_EV4(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' + + def _is_EV5(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' + + def _is_EV56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' + + def _is_PCA56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' + + # Intel + + #XXX + _is_i386 = _not_impl + + def _is_Intel(self): + return self.info[0]['vendor_id']=='GenuineIntel' + + def _is_i486(self): + return self.info[0]['cpu']=='i486' + + def _is_i586(self): + return self.is_Intel() and self.info[0]['cpu family'] == '5' + + def _is_i686(self): + return self.is_Intel() and self.info[0]['cpu family'] == '6' + + def _is_Celeron(self): + return re.match(r'.*?Celeron', + self.info[0]['model name']) is not None + + def _is_Pentium(self): + return re.match(r'.*?Pentium', + self.info[0]['model name']) is not None + + def _is_PentiumII(self): + return re.match(r'.*?Pentium.*?II\b', + self.info[0]['model name']) is not None + + def _is_PentiumPro(self): + return re.match(r'.*?PentiumPro\b', + self.info[0]['model name']) is not None + + def _is_PentiumMMX(self): + return re.match(r'.*?Pentium.*?MMX\b', + self.info[0]['model name']) is not None + + def _is_PentiumIII(self): + return re.match(r'.*?Pentium.*?III\b', + self.info[0]['model name']) is not None + + def _is_PentiumIV(self): + return re.match(r'.*?Pentium.*?(IV|4)\b', + self.info[0]['model name']) is not None + + def _is_PentiumM(self): + return re.match(r'.*?Pentium.*?M\b', + self.info[0]['model name']) is not None + + def _is_Prescott(self): + return self.is_PentiumIV() and self.has_sse3() + + def _is_Nocona(self): + return (self.is_Intel() + and (self.info[0]['cpu family'] == '6' + or self.info[0]['cpu family'] == '15') + and (self.has_sse3() and not self.has_ssse3()) + and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) + + def _is_Core2(self): + return (self.is_64bit() and self.is_Intel() and + re.match(r'.*?Core\(TM\)2\b', + self.info[0]['model name']) is not None) + + def _is_Itanium(self): + return re.match(r'.*?Itanium\b', + self.info[0]['family']) is not None + + def _is_XEON(self): + return re.match(r'.*?XEON\b', + self.info[0]['model name'], re.IGNORECASE) is not None + + _is_Xeon = _is_XEON + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_fdiv_bug(self): + return self.info[0]['fdiv_bug']=='yes' + + def _has_f00f_bug(self): + return self.info[0]['f00f_bug']=='yes' + + def _has_mmx(self): + return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None + + def _has_sse(self): + return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None + + def _has_sse2(self): + return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None + + def _has_sse3(self): + return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None + + def _has_ssse3(self): + return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None + + def _has_3dnow(self): + return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None + + def _has_3dnowext(self): + return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None + +class IRIXCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = key_value_from_command('sysconf', sep=' ', + successful_status=(0, 1)) + self.__class__.info = info + + def _not_impl(self): pass + + def _is_singleCPU(self): + return self.info.get('NUM_PROCESSORS') == '1' + + def _getNCPUs(self): + return int(self.info.get('NUM_PROCESSORS', 1)) + + def __cputype(self, n): + return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) + def _is_r2000(self): return self.__cputype(2000) + def _is_r3000(self): return self.__cputype(3000) + def _is_r3900(self): return self.__cputype(3900) + def _is_r4000(self): return self.__cputype(4000) + def _is_r4100(self): return self.__cputype(4100) + def _is_r4300(self): return self.__cputype(4300) + def _is_r4400(self): return self.__cputype(4400) + def _is_r4600(self): return self.__cputype(4600) + def _is_r4650(self): return self.__cputype(4650) + def _is_r5000(self): return self.__cputype(5000) + def _is_r6000(self): return self.__cputype(6000) + def _is_r8000(self): return self.__cputype(8000) + def _is_r10000(self): return self.__cputype(10000) + def _is_r12000(self): return self.__cputype(12000) + def _is_rorion(self): return self.__cputype('orion') + + def get_ip(self): + try: return self.info.get('MACHINE') + except Exception: pass + def __machine(self, n): + return self.info.get('MACHINE').lower() == 'ip%s' % (n) + def _is_IP19(self): return self.__machine(19) + def _is_IP20(self): return self.__machine(20) + def _is_IP21(self): return self.__machine(21) + def _is_IP22(self): return self.__machine(22) + def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() + def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() + def _is_IP24(self): return self.__machine(24) + def _is_IP25(self): return self.__machine(25) + def _is_IP26(self): return self.__machine(26) + def _is_IP27(self): return self.__machine(27) + def _is_IP28(self): return self.__machine(28) + def _is_IP30(self): return self.__machine(30) + def _is_IP32(self): return self.__machine(32) + def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() + def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() + + +class DarwinCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + machine='machine') + info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') + self.__class__.info = info + + def _not_impl(self): pass + + def _getNCPUs(self): + return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) + + def _is_Power_Macintosh(self): + return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' + + def _is_i386(self): + return self.info['arch']=='i386' + def _is_ppc(self): + return self.info['arch']=='ppc' + + def __machine(self, n): + return self.info['machine'] == 'ppc%s'%n + def _is_ppc601(self): return self.__machine(601) + def _is_ppc602(self): return self.__machine(602) + def _is_ppc603(self): return self.__machine(603) + def _is_ppc603e(self): return self.__machine('603e') + def _is_ppc604(self): return self.__machine(604) + def _is_ppc604e(self): return self.__machine('604e') + def _is_ppc620(self): return self.__machine(620) + def _is_ppc630(self): return self.__machine(630) + def _is_ppc740(self): return self.__machine(740) + def _is_ppc7400(self): return self.__machine(7400) + def _is_ppc7450(self): return self.__machine(7450) + def _is_ppc750(self): return self.__machine(750) + def _is_ppc403(self): return self.__machine(403) + def _is_ppc505(self): return self.__machine(505) + def _is_ppc801(self): return self.__machine(801) + def _is_ppc821(self): return self.__machine(821) + def _is_ppc823(self): return self.__machine(823) + def _is_ppc860(self): return self.__machine(860) + + +class SunOSCPUInfo(CPUInfoBase): + + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + mach='mach', + uname_i='uname_i', + isainfo_b='isainfo -b', + isainfo_n='isainfo -n', + ) + info['uname_X'] = key_value_from_command('uname -X', sep='=') + for line in command_by_line('psrinfo -v 0'): + m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) + if m: + info['processor'] = m.group('p') + break + self.__class__.info = info + + def _not_impl(self): pass + + def _is_i386(self): + return self.info['isainfo_n']=='i386' + def _is_sparc(self): + return self.info['isainfo_n']=='sparc' + def _is_sparcv9(self): + return self.info['isainfo_n']=='sparcv9' + + def _getNCPUs(self): + return int(self.info['uname_X'].get('NumCPU', 1)) + + def _is_sun4(self): + return self.info['arch']=='sun4' + + def _is_SUNW(self): + return re.match(r'SUNW', self.info['uname_i']) is not None + def _is_sparcstation5(self): + return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None + def _is_ultra1(self): + return re.match(r'.*Ultra-1', self.info['uname_i']) is not None + def _is_ultra250(self): + return re.match(r'.*Ultra-250', self.info['uname_i']) is not None + def _is_ultra2(self): + return re.match(r'.*Ultra-2', self.info['uname_i']) is not None + def _is_ultra30(self): + return re.match(r'.*Ultra-30', self.info['uname_i']) is not None + def _is_ultra4(self): + return re.match(r'.*Ultra-4', self.info['uname_i']) is not None + def _is_ultra5_10(self): + return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None + def _is_ultra5(self): + return re.match(r'.*Ultra-5', self.info['uname_i']) is not None + def _is_ultra60(self): + return re.match(r'.*Ultra-60', self.info['uname_i']) is not None + def _is_ultra80(self): + return re.match(r'.*Ultra-80', self.info['uname_i']) is not None + def _is_ultraenterprice(self): + return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None + def _is_ultraenterprice10k(self): + return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None + def _is_sunfire(self): + return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None + def _is_ultra(self): + return re.match(r'.*Ultra', self.info['uname_i']) is not None + + def _is_cpusparcv7(self): + return self.info['processor']=='sparcv7' + def _is_cpusparcv8(self): + return self.info['processor']=='sparcv8' + def _is_cpusparcv9(self): + return self.info['processor']=='sparcv9' + +class Win32CPUInfo(CPUInfoBase): + + info = None + pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" + # XXX: what does the value of + # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 + # mean? + + def __init__(self): + if self.info is not None: + return + info = [] + try: + #XXX: Bad style to use so long `try:...except:...`. Fix it! + import winreg + + prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" + r"\s+stepping\s+(?P\d+)", re.IGNORECASE) + chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) + pnum=0 + while True: + try: + proc=winreg.EnumKey(chnd, pnum) + except winreg.error: + break + else: + pnum+=1 + info.append({"Processor":proc}) + phnd=winreg.OpenKey(chnd, proc) + pidx=0 + while True: + try: + name, value, vtpe=winreg.EnumValue(phnd, pidx) + except winreg.error: + break + else: + pidx=pidx+1 + info[-1][name]=value + if name=="Identifier": + srch=prgx.search(value) + if srch: + info[-1]["Family"]=int(srch.group("FML")) + info[-1]["Model"]=int(srch.group("MDL")) + info[-1]["Stepping"]=int(srch.group("STP")) + except Exception as e: + print(e, '(ignoring)') + self.__class__.info = info + + def _not_impl(self): pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['VendorIdentifier']=='AuthenticAMD' + + def _is_Am486(self): + return self.is_AMD() and self.info[0]['Family']==4 + + def _is_Am5x86(self): + return self.is_AMD() and self.info[0]['Family']==4 + + def _is_AMDK5(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model'] in [0, 1, 2, 3] + + def _is_AMDK6(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model'] in [6, 7] + + def _is_AMDK6_2(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==8 + + def _is_AMDK6_3(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==9 + + def _is_AMDK7(self): + return self.is_AMD() and self.info[0]['Family'] == 6 + + # To reliably distinguish between the different types of AMD64 chips + # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would + # require looking at the 'brand' from cpuid + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['Family'] == 15 + + # Intel + + def _is_Intel(self): + return self.info[0]['VendorIdentifier']=='GenuineIntel' + + def _is_i386(self): + return self.info[0]['Family']==3 + + def _is_i486(self): + return self.info[0]['Family']==4 + + def _is_i586(self): + return self.is_Intel() and self.info[0]['Family']==5 + + def _is_i686(self): + return self.is_Intel() and self.info[0]['Family']==6 + + def _is_Pentium(self): + return self.is_Intel() and self.info[0]['Family']==5 + + def _is_PentiumMMX(self): + return self.is_Intel() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==4 + + def _is_PentiumPro(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model']==1 + + def _is_PentiumII(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model'] in [3, 5, 6] + + def _is_PentiumIII(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model'] in [7, 8, 9, 10, 11] + + def _is_PentiumIV(self): + return self.is_Intel() and self.info[0]['Family']==15 + + def _is_PentiumM(self): + return self.is_Intel() and self.info[0]['Family'] == 6 \ + and self.info[0]['Model'] in [9, 13, 14] + + def _is_Core2(self): + return self.is_Intel() and self.info[0]['Family'] == 6 \ + and self.info[0]['Model'] in [15, 16, 17] + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_mmx(self): + if self.is_Intel(): + return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ + or (self.info[0]['Family'] in [6, 15]) + elif self.is_AMD(): + return self.info[0]['Family'] in [5, 6, 15] + else: + return False + + def _has_sse(self): + if self.is_Intel(): + return ((self.info[0]['Family']==6 and + self.info[0]['Model'] in [7, 8, 9, 10, 11]) + or self.info[0]['Family']==15) + elif self.is_AMD(): + return ((self.info[0]['Family']==6 and + self.info[0]['Model'] in [6, 7, 8, 10]) + or self.info[0]['Family']==15) + else: + return False + + def _has_sse2(self): + if self.is_Intel(): + return self.is_Pentium4() or self.is_PentiumM() \ + or self.is_Core2() + elif self.is_AMD(): + return self.is_AMD64() + else: + return False + + def _has_3dnow(self): + return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] + + def _has_3dnowext(self): + return self.is_AMD() and self.info[0]['Family'] in [6, 15] + +if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) + cpuinfo = LinuxCPUInfo +elif sys.platform.startswith('irix'): + cpuinfo = IRIXCPUInfo +elif sys.platform == 'darwin': + cpuinfo = DarwinCPUInfo +elif sys.platform.startswith('sunos'): + cpuinfo = SunOSCPUInfo +elif sys.platform.startswith('win32'): + cpuinfo = Win32CPUInfo +elif sys.platform.startswith('cygwin'): + cpuinfo = LinuxCPUInfo +#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. +else: + cpuinfo = CPUInfoBase + +cpu = cpuinfo() + +#if __name__ == "__main__": +# +# cpu.is_blaa() +# cpu.is_Intel() +# cpu.is_Alpha() +# +# print('CPU information:'), +# for name in dir(cpuinfo): +# if name[0]=='_' and name[1]!='_': +# r = getattr(cpu,name[1:])() +# if r: +# if r!=1: +# print('%s=%s' %(name[1:],r)) +# else: +# print(name[1:]), +# print() diff --git a/phivenv/Lib/site-packages/numpy/distutils/exec_command.py b/phivenv/Lib/site-packages/numpy/distutils/exec_command.py new file mode 100644 index 0000000000000000000000000000000000000000..7369721ca1d0c8b1992ec04d4469aa9b10650ded --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/exec_command.py @@ -0,0 +1,315 @@ +""" +exec_command + +Implements exec_command function that is (almost) equivalent to +commands.getstatusoutput function but on NT, DOS systems the +returned status is actually correct (though, the returned status +values may be different by a factor). In addition, exec_command +takes keyword arguments for (re-)defining environment variables. + +Provides functions: + + exec_command --- execute command in a specified directory and + in the modified environment. + find_executable --- locate a command using info from environment + variable PATH. Equivalent to posix `which` + command. + +Author: Pearu Peterson +Created: 11 January 2003 + +Requires: Python 2.x + +Successfully tested on: + +======== ============ ================================================= +os.name sys.platform comments +======== ============ ================================================= +posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 + PyCrust 0.9.3, Idle 1.0.2 +posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 +posix sunos5 SunOS 5.9, Python 2.2, 2.3.2 +posix darwin Darwin 7.2.0, Python 2.3 +nt win32 Windows Me + Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 + Python 2.1.1 Idle 0.8 +nt win32 Windows 98, Python 2.1.1. Idle 0.8 +nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests + fail i.e. redefining environment variables may + not work. FIXED: don't use cygwin echo! + Comment: also `cmd /c echo` will not work + but redefining environment variables do work. +posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special) +nt win32 Windows XP, Python 2.3.3 +======== ============ ================================================= + +Known bugs: + +* Tests, that send messages to stderr, fail when executed from MSYS prompt + because the messages are lost at some point. + +""" +__all__ = ['exec_command', 'find_executable'] + +import os +import sys +import subprocess +import locale +import warnings + +from numpy.distutils.misc_util import is_sequence, make_temp_file +from numpy.distutils import log + +def filepath_from_subprocess_output(output): + """ + Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`. + + Inherited from `exec_command`, and possibly incorrect. + """ + mylocale = locale.getpreferredencoding(False) + if mylocale is None: + mylocale = 'ascii' + output = output.decode(mylocale, errors='replace') + output = output.replace('\r\n', '\n') + # Another historical oddity + if output[-1:] == '\n': + output = output[:-1] + return output + + +def forward_bytes_to_stdout(val): + """ + Forward bytes from a subprocess call to the console, without attempting to + decode them. + + The assumption is that the subprocess call already returned bytes in + a suitable encoding. + """ + if hasattr(sys.stdout, 'buffer'): + # use the underlying binary output if there is one + sys.stdout.buffer.write(val) + elif hasattr(sys.stdout, 'encoding'): + # round-trip the encoding if necessary + sys.stdout.write(val.decode(sys.stdout.encoding)) + else: + # make a best-guess at the encoding + sys.stdout.write(val.decode('utf8', errors='replace')) + + +def temp_file_name(): + # 2019-01-30, 1.17 + warnings.warn('temp_file_name is deprecated since NumPy v1.17, use ' + 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1) + fo, name = make_temp_file() + fo.close() + return name + +def get_pythonexe(): + pythonexe = sys.executable + if os.name in ['nt', 'dos']: + fdir, fn = os.path.split(pythonexe) + fn = fn.upper().replace('PYTHONW', 'PYTHON') + pythonexe = os.path.join(fdir, fn) + assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) + return pythonexe + +def find_executable(exe, path=None, _cache={}): + """Return full path of a executable or None. + + Symbolic links are not followed. + """ + key = exe, path + try: + return _cache[key] + except KeyError: + pass + log.debug('find_executable(%r)' % exe) + orig_exe = exe + + if path is None: + path = os.environ.get('PATH', os.defpath) + if os.name=='posix': + realpath = os.path.realpath + else: + realpath = lambda a:a + + if exe.startswith('"'): + exe = exe[1:-1] + + suffixes = [''] + if os.name in ['nt', 'dos', 'os2']: + fn, ext = os.path.splitext(exe) + extra_suffixes = ['.exe', '.com', '.bat'] + if ext.lower() not in extra_suffixes: + suffixes = extra_suffixes + + if os.path.isabs(exe): + paths = [''] + else: + paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] + + for path in paths: + fn = os.path.join(path, exe) + for s in suffixes: + f_ext = fn+s + if not os.path.islink(f_ext): + f_ext = realpath(f_ext) + if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): + log.info('Found executable %s' % f_ext) + _cache[key] = f_ext + return f_ext + + log.warn('Could not locate executable %s' % orig_exe) + return None + +############################################################ + +def _preserve_environment( names ): + log.debug('_preserve_environment(%r)' % (names)) + env = {name: os.environ.get(name) for name in names} + return env + +def _update_environment( **env ): + log.debug('_update_environment(...)') + for name, value in env.items(): + os.environ[name] = value or '' + +def exec_command(command, execute_in='', use_shell=None, use_tee=None, + _with_python = 1, **env ): + """ + Return (status,output) of executed command. + + .. deprecated:: 1.17 + Use subprocess.Popen instead + + Parameters + ---------- + command : str + A concatenated string of executable and arguments. + execute_in : str + Before running command ``cd execute_in`` and after ``cd -``. + use_shell : {bool, None}, optional + If True, execute ``sh -c command``. Default None (True) + use_tee : {bool, None}, optional + If True use tee. Default None (True) + + + Returns + ------- + res : str + Both stdout and stderr messages. + + Notes + ----- + On NT, DOS systems the returned status is correct for external commands. + Wild cards will not work for non-posix systems or when use_shell=0. + + """ + # 2019-01-30, 1.17 + warnings.warn('exec_command is deprecated since NumPy v1.17, use ' + 'subprocess.Popen instead', DeprecationWarning, stacklevel=1) + log.debug('exec_command(%r,%s)' % (command, + ','.join(['%s=%r'%kv for kv in env.items()]))) + + if use_tee is None: + use_tee = os.name=='posix' + if use_shell is None: + use_shell = os.name=='posix' + execute_in = os.path.abspath(execute_in) + oldcwd = os.path.abspath(os.getcwd()) + + if __name__[-12:] == 'exec_command': + exec_dir = os.path.dirname(os.path.abspath(__file__)) + elif os.path.isfile('exec_command.py'): + exec_dir = os.path.abspath('.') + else: + exec_dir = os.path.abspath(sys.argv[0]) + if os.path.isfile(exec_dir): + exec_dir = os.path.dirname(exec_dir) + + if oldcwd!=execute_in: + os.chdir(execute_in) + log.debug('New cwd: %s' % execute_in) + else: + log.debug('Retaining cwd: %s' % oldcwd) + + oldenv = _preserve_environment( list(env.keys()) ) + _update_environment( **env ) + + try: + st = _exec_command(command, + use_shell=use_shell, + use_tee=use_tee, + **env) + finally: + if oldcwd!=execute_in: + os.chdir(oldcwd) + log.debug('Restored cwd to %s' % oldcwd) + _update_environment(**oldenv) + + return st + + +def _exec_command(command, use_shell=None, use_tee = None, **env): + """ + Internal workhorse for exec_command(). + """ + if use_shell is None: + use_shell = os.name=='posix' + if use_tee is None: + use_tee = os.name=='posix' + + if os.name == 'posix' and use_shell: + # On POSIX, subprocess always uses /bin/sh, override + sh = os.environ.get('SHELL', '/bin/sh') + if is_sequence(command): + command = [sh, '-c', ' '.join(command)] + else: + command = [sh, '-c', command] + use_shell = False + + elif os.name == 'nt' and is_sequence(command): + # On Windows, join the string for CreateProcess() ourselves as + # subprocess does it a bit differently + command = ' '.join(_quote_arg(arg) for arg in command) + + # Inherit environment by default + env = env or None + try: + # text is set to False so that communicate() + # will return bytes. We need to decode the output ourselves + # so that Python will not raise a UnicodeDecodeError when + # it encounters an invalid character; rather, we simply replace it + proc = subprocess.Popen(command, shell=use_shell, env=env, text=False, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + except OSError: + # Return 127, as os.spawn*() and /bin/sh do + return 127, '' + + text, err = proc.communicate() + mylocale = locale.getpreferredencoding(False) + if mylocale is None: + mylocale = 'ascii' + text = text.decode(mylocale, errors='replace') + text = text.replace('\r\n', '\n') + # Another historical oddity + if text[-1:] == '\n': + text = text[:-1] + + if use_tee and text: + print(text) + return proc.returncode, text + + +def _quote_arg(arg): + """ + Quote the argument for safe use in a shell command line. + """ + # If there is a quote in the string, assume relevants parts of the + # string are already quoted (e.g. '-I"C:\\Program Files\\..."') + if '"' not in arg and ' ' in arg: + return '"%s"' % arg + return arg + +############################################################ diff --git a/phivenv/Lib/site-packages/numpy/distutils/extension.py b/phivenv/Lib/site-packages/numpy/distutils/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..73b2437ca757869c034e36d4bce3d2948a8befc2 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/extension.py @@ -0,0 +1,107 @@ +"""distutils.extension + +Provides the Extension class, used to describe C/C++ extension +modules in setup scripts. + +Overridden to support f2py. + +""" +import re +from distutils.extension import Extension as old_Extension + + +cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match +fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match + + +class Extension(old_Extension): + """ + Parameters + ---------- + name : str + Extension name. + sources : list of str + List of source file locations relative to the top directory of + the package. + extra_compile_args : list of str + Extra command line arguments to pass to the compiler. + extra_f77_compile_args : list of str + Extra command line arguments to pass to the fortran77 compiler. + extra_f90_compile_args : list of str + Extra command line arguments to pass to the fortran90 compiler. + """ + def __init__( + self, name, sources, + include_dirs=None, + define_macros=None, + undef_macros=None, + library_dirs=None, + libraries=None, + runtime_library_dirs=None, + extra_objects=None, + extra_compile_args=None, + extra_link_args=None, + export_symbols=None, + swig_opts=None, + depends=None, + language=None, + f2py_options=None, + module_dirs=None, + extra_c_compile_args=None, + extra_cxx_compile_args=None, + extra_f77_compile_args=None, + extra_f90_compile_args=None,): + + old_Extension.__init__( + self, name, [], + include_dirs=include_dirs, + define_macros=define_macros, + undef_macros=undef_macros, + library_dirs=library_dirs, + libraries=libraries, + runtime_library_dirs=runtime_library_dirs, + extra_objects=extra_objects, + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, + export_symbols=export_symbols) + + # Avoid assert statements checking that sources contains strings: + self.sources = sources + + # Python 2.4 distutils new features + self.swig_opts = swig_opts or [] + # swig_opts is assumed to be a list. Here we handle the case where it + # is specified as a string instead. + if isinstance(self.swig_opts, str): + import warnings + msg = "swig_opts is specified as a string instead of a list" + warnings.warn(msg, SyntaxWarning, stacklevel=2) + self.swig_opts = self.swig_opts.split() + + # Python 2.3 distutils new features + self.depends = depends or [] + self.language = language + + # numpy_distutils features + self.f2py_options = f2py_options or [] + self.module_dirs = module_dirs or [] + self.extra_c_compile_args = extra_c_compile_args or [] + self.extra_cxx_compile_args = extra_cxx_compile_args or [] + self.extra_f77_compile_args = extra_f77_compile_args or [] + self.extra_f90_compile_args = extra_f90_compile_args or [] + + return + + def has_cxx_sources(self): + for source in self.sources: + if cxx_ext_re(str(source)): + return True + return False + + def has_f2py_sources(self): + for source in self.sources: + if fortran_pyf_ext_re(source): + return True + return False + +# class Extension diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__init__.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0bbb748039a94172a5267aa52aa7a2adb8926ea6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__init__.py @@ -0,0 +1,1035 @@ +"""numpy.distutils.fcompiler + +Contains FCompiler, an abstract base class that defines the interface +for the numpy.distutils Fortran compiler abstraction model. + +Terminology: + +To be consistent, where the term 'executable' is used, it means the single +file, like 'gcc', that is executed, and should be a string. In contrast, +'command' means the entire command line, like ['gcc', '-c', 'file.c'], and +should be a list. + +But note that FCompiler.executables is actually a dictionary of commands. + +""" +__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', + 'dummy_fortran_file'] + +import os +import sys +import re +from pathlib import Path + +from distutils.sysconfig import get_python_lib +from distutils.fancy_getopt import FancyGetopt +from distutils.errors import DistutilsModuleError, \ + DistutilsExecError, CompileError, LinkError, DistutilsPlatformError +from distutils.util import split_quoted, strtobool + +from numpy.distutils.ccompiler import CCompiler, gen_lib_options +from numpy.distutils import log +from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ + make_temp_file, get_shared_lib_extension +from numpy.distutils.exec_command import find_executable +from numpy.distutils import _shell_utils + +from .environment import EnvironmentConfig + +__metaclass__ = type + + +FORTRAN_COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f'] + + +class CompilerNotFound(Exception): + pass + +def flaglist(s): + if is_string(s): + return split_quoted(s) + else: + return s + +def str2bool(s): + if is_string(s): + return strtobool(s) + return bool(s) + +def is_sequence_of_strings(seq): + return is_sequence(seq) and all_strings(seq) + +class FCompiler(CCompiler): + """Abstract base class to define the interface that must be implemented + by real Fortran compiler classes. + + Methods that subclasses may redefine: + + update_executables(), find_executables(), get_version() + get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() + get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), + get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), + get_flags_arch_f90(), get_flags_debug_f90(), + get_flags_fix(), get_flags_linker_so() + + DON'T call these methods (except get_version) after + constructing a compiler instance or inside any other method. + All methods, except update_executables() and find_executables(), + may call the get_version() method. + + After constructing a compiler instance, always call customize(dist=None) + method that finalizes compiler construction and makes the following + attributes available: + compiler_f77 + compiler_f90 + compiler_fix + linker_so + archiver + ranlib + libraries + library_dirs + """ + + # These are the environment variables and distutils keys used. + # Each configuration description is + # (, , , , ) + # The hook names are handled by the self._environment_hook method. + # - names starting with 'self.' call methods in this class + # - names starting with 'exe.' return the key in the executables dict + # - names like 'flags.YYY' return self.get_flag_YYY() + # convert is either None or a function to convert a string to the + # appropriate type used. + + distutils_vars = EnvironmentConfig( + distutils_section='config_fc', + noopt = (None, None, 'noopt', str2bool, False), + noarch = (None, None, 'noarch', str2bool, False), + debug = (None, None, 'debug', str2bool, False), + verbose = (None, None, 'verbose', str2bool, False), + ) + + command_vars = EnvironmentConfig( + distutils_section='config_fc', + compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False), + compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False), + compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False), + version_cmd = ('exe.version_cmd', None, None, None, False), + linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False), + linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False), + archiver = (None, 'AR', 'ar', None, False), + ranlib = (None, 'RANLIB', 'ranlib', None, False), + ) + + flag_vars = EnvironmentConfig( + distutils_section='config_fc', + f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True), + f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True), + free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True), + fix = ('flags.fix', None, None, flaglist, False), + opt = ('flags.opt', 'FOPT', 'opt', flaglist, True), + opt_f77 = ('flags.opt_f77', None, None, flaglist, False), + opt_f90 = ('flags.opt_f90', None, None, flaglist, False), + arch = ('flags.arch', 'FARCH', 'arch', flaglist, False), + arch_f77 = ('flags.arch_f77', None, None, flaglist, False), + arch_f90 = ('flags.arch_f90', None, None, flaglist, False), + debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True), + debug_f77 = ('flags.debug_f77', None, None, flaglist, False), + debug_f90 = ('flags.debug_f90', None, None, flaglist, False), + flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True), + linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True), + linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True), + ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True), + ) + + language_map = {'.f': 'f77', + '.for': 'f77', + '.F': 'f77', # XXX: needs preprocessor + '.ftn': 'f77', + '.f77': 'f77', + '.f90': 'f90', + '.F90': 'f90', # XXX: needs preprocessor + '.f95': 'f90', + } + language_order = ['f90', 'f77'] + + + # These will be set by the subclass + + compiler_type = None + compiler_aliases = () + version_pattern = None + + possible_executables = [] + executables = { + 'version_cmd': ["f77", "-v"], + 'compiler_f77': ["f77"], + 'compiler_f90': ["f90"], + 'compiler_fix': ["f90", "-fixed"], + 'linker_so': ["f90", "-shared"], + 'linker_exe': ["f90"], + 'archiver': ["ar", "-cr"], + 'ranlib': None, + } + + # If compiler does not support compiling Fortran 90 then it can + # suggest using another compiler. For example, gnu would suggest + # gnu95 compiler type when there are F90 sources. + suggested_f90_compiler = None + + compile_switch = "-c" + object_switch = "-o " # Ending space matters! It will be stripped + # but if it is missing then object_switch + # will be prefixed to object file name by + # string concatenation. + library_switch = "-o " # Ditto! + + # Switch to specify where module files are created and searched + # for USE statement. Normally it is a string and also here ending + # space matters. See above. + module_dir_switch = None + + # Switch to specify where module files are searched for USE statement. + module_include_switch = '-I' + + pic_flags = [] # Flags to create position-independent code + + src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] + obj_extension = ".o" + + shared_lib_extension = get_shared_lib_extension() + static_lib_extension = ".a" # or .lib + static_lib_format = "lib%s%s" # or %s%s + shared_lib_format = "%s%s" + exe_extension = "" + + _exe_cache = {} + + _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', + 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', + 'ranlib'] + + # This will be set by new_fcompiler when called in + # command/{build_ext.py, build_clib.py, config.py} files. + c_compiler = None + + # extra_{f77,f90}_compile_args are set by build_ext.build_extension method + extra_f77_compile_args = [] + extra_f90_compile_args = [] + + def __init__(self, *args, **kw): + CCompiler.__init__(self, *args, **kw) + self.distutils_vars = self.distutils_vars.clone(self._environment_hook) + self.command_vars = self.command_vars.clone(self._environment_hook) + self.flag_vars = self.flag_vars.clone(self._environment_hook) + self.executables = self.executables.copy() + for e in self._executable_keys: + if e not in self.executables: + self.executables[e] = None + + # Some methods depend on .customize() being called first, so + # this keeps track of whether that's happened yet. + self._is_customised = False + + def __copy__(self): + obj = self.__new__(self.__class__) + obj.__dict__.update(self.__dict__) + obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) + obj.command_vars = obj.command_vars.clone(obj._environment_hook) + obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) + obj.executables = obj.executables.copy() + return obj + + def copy(self): + return self.__copy__() + + # Use properties for the attributes used by CCompiler. Setting them + # as attributes from the self.executables dictionary is error-prone, + # so we get them from there each time. + def _command_property(key): + def fget(self): + assert self._is_customised + return self.executables[key] + return property(fget=fget) + version_cmd = _command_property('version_cmd') + compiler_f77 = _command_property('compiler_f77') + compiler_f90 = _command_property('compiler_f90') + compiler_fix = _command_property('compiler_fix') + linker_so = _command_property('linker_so') + linker_exe = _command_property('linker_exe') + archiver = _command_property('archiver') + ranlib = _command_property('ranlib') + + # Make our terminology consistent. + def set_executable(self, key, value): + self.set_command(key, value) + + def set_commands(self, **kw): + for k, v in kw.items(): + self.set_command(k, v) + + def set_command(self, key, value): + if not key in self._executable_keys: + raise ValueError( + "unknown executable '%s' for class %s" % + (key, self.__class__.__name__)) + if is_string(value): + value = split_quoted(value) + assert value is None or is_sequence_of_strings(value[1:]), (key, value) + self.executables[key] = value + + ###################################################################### + ## Methods that subclasses may redefine. But don't call these methods! + ## They are private to FCompiler class and may return unexpected + ## results if used elsewhere. So, you have been warned.. + + def find_executables(self): + """Go through the self.executables dictionary, and attempt to + find and assign appropriate executables. + + Executable names are looked for in the environment (environment + variables, the distutils.cfg, and command line), the 0th-element of + the command list, and the self.possible_executables list. + + Also, if the 0th element is "" or "", the Fortran 77 + or the Fortran 90 compiler executable is used, unless overridden + by an environment setting. + + Subclasses should call this if overridden. + """ + assert self._is_customised + exe_cache = self._exe_cache + def cached_find_executable(exe): + if exe in exe_cache: + return exe_cache[exe] + fc_exe = find_executable(exe) + exe_cache[exe] = exe_cache[fc_exe] = fc_exe + return fc_exe + def verify_command_form(name, value): + if value is not None and not is_sequence_of_strings(value): + raise ValueError( + "%s value %r is invalid in class %s" % + (name, value, self.__class__.__name__)) + def set_exe(exe_key, f77=None, f90=None): + cmd = self.executables.get(exe_key, None) + if not cmd: + return None + # Note that we get cmd[0] here if the environment doesn't + # have anything set + exe_from_environ = getattr(self.command_vars, exe_key) + if not exe_from_environ: + possibles = [f90, f77] + self.possible_executables + else: + possibles = [exe_from_environ] + self.possible_executables + + seen = set() + unique_possibles = [] + for e in possibles: + if e == '': + e = f77 + elif e == '': + e = f90 + if not e or e in seen: + continue + seen.add(e) + unique_possibles.append(e) + + for exe in unique_possibles: + fc_exe = cached_find_executable(exe) + if fc_exe: + cmd[0] = fc_exe + return fc_exe + self.set_command(exe_key, None) + return None + + ctype = self.compiler_type + f90 = set_exe('compiler_f90') + if not f90: + f77 = set_exe('compiler_f77') + if f77: + log.warn('%s: no Fortran 90 compiler found' % ctype) + else: + raise CompilerNotFound('%s: f90 nor f77' % ctype) + else: + f77 = set_exe('compiler_f77', f90=f90) + if not f77: + log.warn('%s: no Fortran 77 compiler found' % ctype) + set_exe('compiler_fix', f90=f90) + + set_exe('linker_so', f77=f77, f90=f90) + set_exe('linker_exe', f77=f77, f90=f90) + set_exe('version_cmd', f77=f77, f90=f90) + set_exe('archiver') + set_exe('ranlib') + + def update_executables(self): + """Called at the beginning of customisation. Subclasses should + override this if they need to set up the executables dictionary. + + Note that self.find_executables() is run afterwards, so the + self.executables dictionary values can contain or as + the command, which will be replaced by the found F77 or F90 + compiler. + """ + pass + + def get_flags(self): + """List of flags common to all compiler types.""" + return [] + self.pic_flags + + def _get_command_flags(self, key): + cmd = self.executables.get(key, None) + if cmd is None: + return [] + return cmd[1:] + + def get_flags_f77(self): + """List of Fortran 77 specific flags.""" + return self._get_command_flags('compiler_f77') + def get_flags_f90(self): + """List of Fortran 90 specific flags.""" + return self._get_command_flags('compiler_f90') + def get_flags_free(self): + """List of Fortran 90 free format specific flags.""" + return [] + def get_flags_fix(self): + """List of Fortran 90 fixed format specific flags.""" + return self._get_command_flags('compiler_fix') + def get_flags_linker_so(self): + """List of linker flags to build a shared library.""" + return self._get_command_flags('linker_so') + def get_flags_linker_exe(self): + """List of linker flags to build an executable.""" + return self._get_command_flags('linker_exe') + def get_flags_ar(self): + """List of archiver flags. """ + return self._get_command_flags('archiver') + def get_flags_opt(self): + """List of architecture independent compiler flags.""" + return [] + def get_flags_arch(self): + """List of architecture dependent compiler flags.""" + return [] + def get_flags_debug(self): + """List of compiler flags to compile with debugging information.""" + return [] + + get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt + get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch + get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug + + def get_libraries(self): + """List of compiler libraries.""" + return self.libraries[:] + def get_library_dirs(self): + """List of compiler library directories.""" + return self.library_dirs[:] + + def get_version(self, force=False, ok_status=[0]): + assert self._is_customised + version = CCompiler.get_version(self, force=force, ok_status=ok_status) + if version is None: + raise CompilerNotFound() + return version + + + ############################################################ + + ## Public methods: + + def customize(self, dist = None): + """Customize Fortran compiler. + + This method gets Fortran compiler specific information from + (i) class definition, (ii) environment, (iii) distutils config + files, and (iv) command line (later overrides earlier). + + This method should be always called after constructing a + compiler instance. But not in __init__ because Distribution + instance is needed for (iii) and (iv). + """ + log.info('customize %s' % (self.__class__.__name__)) + + self._is_customised = True + + self.distutils_vars.use_distribution(dist) + self.command_vars.use_distribution(dist) + self.flag_vars.use_distribution(dist) + + self.update_executables() + + # find_executables takes care of setting the compiler commands, + # version_cmd, linker_so, linker_exe, ar, and ranlib + self.find_executables() + + noopt = self.distutils_vars.get('noopt', False) + noarch = self.distutils_vars.get('noarch', noopt) + debug = self.distutils_vars.get('debug', False) + + f77 = self.command_vars.compiler_f77 + f90 = self.command_vars.compiler_f90 + + f77flags = [] + f90flags = [] + freeflags = [] + fixflags = [] + + if f77: + f77 = _shell_utils.NativeParser.split(f77) + f77flags = self.flag_vars.f77 + if f90: + f90 = _shell_utils.NativeParser.split(f90) + f90flags = self.flag_vars.f90 + freeflags = self.flag_vars.free + # XXX Assuming that free format is default for f90 compiler. + fix = self.command_vars.compiler_fix + # NOTE: this and similar examples are probably just + # excluding --coverage flag when F90 = gfortran --coverage + # instead of putting that flag somewhere more appropriate + # this and similar examples where a Fortran compiler + # environment variable has been customized by CI or a user + # should perhaps eventually be more thoroughly tested and more + # robustly handled + if fix: + fix = _shell_utils.NativeParser.split(fix) + fixflags = self.flag_vars.fix + f90flags + + oflags, aflags, dflags = [], [], [] + # examine get_flags__ for extra flags + # only add them if the method is different from get_flags_ + def get_flags(tag, flags): + # note that self.flag_vars. calls self.get_flags_() + flags.extend(getattr(self.flag_vars, tag)) + this_get = getattr(self, 'get_flags_' + tag) + for name, c, flagvar in [('f77', f77, f77flags), + ('f90', f90, f90flags), + ('f90', fix, fixflags)]: + t = '%s_%s' % (tag, name) + if c and this_get is not getattr(self, 'get_flags_' + t): + flagvar.extend(getattr(self.flag_vars, t)) + if not noopt: + get_flags('opt', oflags) + if not noarch: + get_flags('arch', aflags) + if debug: + get_flags('debug', dflags) + + fflags = self.flag_vars.flags + dflags + oflags + aflags + + if f77: + self.set_commands(compiler_f77=f77+f77flags+fflags) + if f90: + self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags) + if fix: + self.set_commands(compiler_fix=fix+fixflags+fflags) + + + #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS + linker_so = self.linker_so + if linker_so: + linker_so_flags = self.flag_vars.linker_so + if sys.platform.startswith('aix'): + python_lib = get_python_lib(standard_lib=1) + ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') + python_exp = os.path.join(python_lib, 'config', 'python.exp') + linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] + if sys.platform.startswith('os400'): + from distutils.sysconfig import get_config_var + python_config = get_config_var('LIBPL') + ld_so_aix = os.path.join(python_config, 'ld_so_aix') + python_exp = os.path.join(python_config, 'python.exp') + linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] + self.set_commands(linker_so=linker_so+linker_so_flags) + + linker_exe = self.linker_exe + if linker_exe: + linker_exe_flags = self.flag_vars.linker_exe + self.set_commands(linker_exe=linker_exe+linker_exe_flags) + + ar = self.command_vars.archiver + if ar: + arflags = self.flag_vars.ar + self.set_commands(archiver=[ar]+arflags) + + self.set_library_dirs(self.get_library_dirs()) + self.set_libraries(self.get_libraries()) + + def dump_properties(self): + """Print out the attributes of a compiler instance.""" + props = [] + for key in list(self.executables.keys()) + \ + ['version', 'libraries', 'library_dirs', + 'object_switch', 'compile_switch']: + if hasattr(self, key): + v = getattr(self, key) + props.append((key, None, '= '+repr(v))) + props.sort() + + pretty_printer = FancyGetopt(props) + for l in pretty_printer.generate_help("%s instance properties:" \ + % (self.__class__.__name__)): + if l[:4]==' --': + l = ' ' + l[4:] + print(l) + + ################### + + def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): + """Compile 'src' to product 'obj'.""" + src_flags = {} + if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ + and not has_f90_header(src): + flavor = ':f77' + compiler = self.compiler_f77 + src_flags = get_f77flags(src) + extra_compile_args = self.extra_f77_compile_args or [] + elif is_free_format(src): + flavor = ':f90' + compiler = self.compiler_f90 + if compiler is None: + raise DistutilsExecError('f90 not supported by %s needed for %s'\ + % (self.__class__.__name__, src)) + extra_compile_args = self.extra_f90_compile_args or [] + else: + flavor = ':fix' + compiler = self.compiler_fix + if compiler is None: + raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ + % (self.__class__.__name__, src)) + extra_compile_args = self.extra_f90_compile_args or [] + if self.object_switch[-1]==' ': + o_args = [self.object_switch.strip(), obj] + else: + o_args = [self.object_switch.strip()+obj] + + assert self.compile_switch.strip() + s_args = [self.compile_switch, src] + + if extra_compile_args: + log.info('extra %s options: %r' \ + % (flavor[1:], ' '.join(extra_compile_args))) + + extra_flags = src_flags.get(self.compiler_type, []) + if extra_flags: + log.info('using compile options from source: %r' \ + % ' '.join(extra_flags)) + + command = compiler + cc_args + extra_flags + s_args + o_args \ + + extra_postargs + extra_compile_args + + display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, + src) + try: + self.spawn(command, display=display) + except DistutilsExecError as e: + msg = str(e) + raise CompileError(msg) from None + + def module_options(self, module_dirs, module_build_dir): + options = [] + if self.module_dir_switch is not None: + if self.module_dir_switch[-1]==' ': + options.extend([self.module_dir_switch.strip(), module_build_dir]) + else: + options.append(self.module_dir_switch.strip()+module_build_dir) + else: + print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) + print('XXX: Fix module_dir_switch for ', self.__class__.__name__) + if self.module_include_switch is not None: + for d in [module_build_dir]+module_dirs: + options.append('%s%s' % (self.module_include_switch, d)) + else: + print('XXX: module_dirs=%r option ignored' % (module_dirs)) + print('XXX: Fix module_include_switch for ', self.__class__.__name__) + return options + + def library_option(self, lib): + return "-l" + lib + def library_dir_option(self, dir): + return "-L" + dir + + def link(self, target_desc, objects, + output_filename, output_dir=None, libraries=None, + library_dirs=None, runtime_library_dirs=None, + export_symbols=None, debug=0, extra_preargs=None, + extra_postargs=None, build_temp=None, target_lang=None): + objects, output_dir = self._fix_object_args(objects, output_dir) + libraries, library_dirs, runtime_library_dirs = \ + self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) + + lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, + libraries) + if is_string(output_dir): + output_filename = os.path.join(output_dir, output_filename) + elif output_dir is not None: + raise TypeError("'output_dir' must be a string or None") + + if self._need_link(objects, output_filename): + if self.library_switch[-1]==' ': + o_args = [self.library_switch.strip(), output_filename] + else: + o_args = [self.library_switch.strip()+output_filename] + + if is_string(self.objects): + ld_args = objects + [self.objects] + else: + ld_args = objects + self.objects + ld_args = ld_args + lib_opts + o_args + if debug: + ld_args[:0] = ['-g'] + if extra_preargs: + ld_args[:0] = extra_preargs + if extra_postargs: + ld_args.extend(extra_postargs) + self.mkpath(os.path.dirname(output_filename)) + if target_desc == CCompiler.EXECUTABLE: + linker = self.linker_exe[:] + else: + linker = self.linker_so[:] + command = linker + ld_args + try: + self.spawn(command) + except DistutilsExecError as e: + msg = str(e) + raise LinkError(msg) from None + else: + log.debug("skipping %s (up-to-date)", output_filename) + + def _environment_hook(self, name, hook_name): + if hook_name is None: + return None + if is_string(hook_name): + if hook_name.startswith('self.'): + hook_name = hook_name[5:] + hook = getattr(self, hook_name) + return hook() + elif hook_name.startswith('exe.'): + hook_name = hook_name[4:] + var = self.executables[hook_name] + if var: + return var[0] + else: + return None + elif hook_name.startswith('flags.'): + hook_name = hook_name[6:] + hook = getattr(self, 'get_flags_' + hook_name) + return hook() + else: + return hook_name() + + def can_ccompiler_link(self, ccompiler): + """ + Check if the given C compiler can link objects produced by + this compiler. + """ + return True + + def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): + """ + Convert a set of object files that are not compatible with the default + linker, to a file that is compatible. + + Parameters + ---------- + objects : list + List of object files to include. + output_dir : str + Output directory to place generated object files. + extra_dll_dir : str + Output directory to place extra DLL files that need to be + included on Windows. + + Returns + ------- + converted_objects : list of str + List of converted object files. + Note that the number of output files is not necessarily + the same as inputs. + + """ + raise NotImplementedError() + + ## class FCompiler + +_default_compilers = ( + # sys.platform mappings + ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', + 'intelvem', 'intelem', 'flang')), + ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), + ('linux.*', ('arm', 'gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', + 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', + 'pathf95', 'nagfor', 'fujitsu')), + ('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu', + 'g95', 'pg')), + ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), + ('irix.*', ('mips', 'gnu', 'gnu95',)), + ('aix.*', ('ibm', 'gnu', 'gnu95',)), + # os.name mappings + ('posix', ('gnu', 'gnu95',)), + ('nt', ('gnu', 'gnu95',)), + ('mac', ('gnu95', 'gnu', 'pg')), + ) + +fcompiler_class = None +fcompiler_aliases = None + +def load_all_fcompiler_classes(): + """Cache all the FCompiler classes found in modules in the + numpy.distutils.fcompiler package. + """ + from glob import glob + global fcompiler_class, fcompiler_aliases + if fcompiler_class is not None: + return + pys = os.path.join(os.path.dirname(__file__), '*.py') + fcompiler_class = {} + fcompiler_aliases = {} + for fname in glob(pys): + module_name, ext = os.path.splitext(os.path.basename(fname)) + module_name = 'numpy.distutils.fcompiler.' + module_name + __import__ (module_name) + module = sys.modules[module_name] + if hasattr(module, 'compilers'): + for cname in module.compilers: + klass = getattr(module, cname) + desc = (klass.compiler_type, klass, klass.description) + fcompiler_class[klass.compiler_type] = desc + for alias in klass.compiler_aliases: + if alias in fcompiler_aliases: + raise ValueError("alias %r defined for both %s and %s" + % (alias, klass.__name__, + fcompiler_aliases[alias][1].__name__)) + fcompiler_aliases[alias] = desc + +def _find_existing_fcompiler(compiler_types, + osname=None, platform=None, + requiref90=False, + c_compiler=None): + from numpy.distutils.core import get_distribution + dist = get_distribution(always=True) + for compiler_type in compiler_types: + v = None + try: + c = new_fcompiler(plat=platform, compiler=compiler_type, + c_compiler=c_compiler) + c.customize(dist) + v = c.get_version() + if requiref90 and c.compiler_f90 is None: + v = None + new_compiler = c.suggested_f90_compiler + if new_compiler: + log.warn('Trying %r compiler as suggested by %r ' + 'compiler for f90 support.' % (compiler_type, + new_compiler)) + c = new_fcompiler(plat=platform, compiler=new_compiler, + c_compiler=c_compiler) + c.customize(dist) + v = c.get_version() + if v is not None: + compiler_type = new_compiler + if requiref90 and c.compiler_f90 is None: + raise ValueError('%s does not support compiling f90 codes, ' + 'skipping.' % (c.__class__.__name__)) + except DistutilsModuleError: + log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) + except CompilerNotFound: + log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) + if v is not None: + return compiler_type + return None + +def available_fcompilers_for_platform(osname=None, platform=None): + if osname is None: + osname = os.name + if platform is None: + platform = sys.platform + matching_compiler_types = [] + for pattern, compiler_type in _default_compilers: + if re.match(pattern, platform) or re.match(pattern, osname): + for ct in compiler_type: + if ct not in matching_compiler_types: + matching_compiler_types.append(ct) + if not matching_compiler_types: + matching_compiler_types.append('gnu') + return matching_compiler_types + +def get_default_fcompiler(osname=None, platform=None, requiref90=False, + c_compiler=None): + """Determine the default Fortran compiler to use for the given + platform.""" + matching_compiler_types = available_fcompilers_for_platform(osname, + platform) + log.info("get_default_fcompiler: matching types: '%s'", + matching_compiler_types) + compiler_type = _find_existing_fcompiler(matching_compiler_types, + osname=osname, + platform=platform, + requiref90=requiref90, + c_compiler=c_compiler) + return compiler_type + +# Flag to avoid rechecking for Fortran compiler every time +failed_fcompilers = set() + +def new_fcompiler(plat=None, + compiler=None, + verbose=0, + dry_run=0, + force=0, + requiref90=False, + c_compiler = None): + """Generate an instance of some FCompiler subclass for the supplied + platform/compiler combination. + """ + global failed_fcompilers + fcompiler_key = (plat, compiler) + if fcompiler_key in failed_fcompilers: + return None + + load_all_fcompiler_classes() + if plat is None: + plat = os.name + if compiler is None: + compiler = get_default_fcompiler(plat, requiref90=requiref90, + c_compiler=c_compiler) + if compiler in fcompiler_class: + module_name, klass, long_description = fcompiler_class[compiler] + elif compiler in fcompiler_aliases: + module_name, klass, long_description = fcompiler_aliases[compiler] + else: + msg = "don't know how to compile Fortran code on platform '%s'" % plat + if compiler is not None: + msg = msg + " with '%s' compiler." % compiler + msg = msg + " Supported compilers are: %s)" \ + % (','.join(fcompiler_class.keys())) + log.warn(msg) + failed_fcompilers.add(fcompiler_key) + return None + + compiler = klass(verbose=verbose, dry_run=dry_run, force=force) + compiler.c_compiler = c_compiler + return compiler + +def show_fcompilers(dist=None): + """Print list of available compilers (used by the "--help-fcompiler" + option to "config_fc"). + """ + if dist is None: + from distutils.dist import Distribution + from numpy.distutils.command.config_compiler import config_fc + dist = Distribution() + dist.script_name = os.path.basename(sys.argv[0]) + dist.script_args = ['config_fc'] + sys.argv[1:] + try: + dist.script_args.remove('--help-fcompiler') + except ValueError: + pass + dist.cmdclass['config_fc'] = config_fc + dist.parse_config_files() + dist.parse_command_line() + compilers = [] + compilers_na = [] + compilers_ni = [] + if not fcompiler_class: + load_all_fcompiler_classes() + platform_compilers = available_fcompilers_for_platform() + for compiler in platform_compilers: + v = None + log.set_verbosity(-2) + try: + c = new_fcompiler(compiler=compiler, verbose=dist.verbose) + c.customize(dist) + v = c.get_version() + except (DistutilsModuleError, CompilerNotFound) as e: + log.debug("show_fcompilers: %s not found" % (compiler,)) + log.debug(repr(e)) + + if v is None: + compilers_na.append(("fcompiler="+compiler, None, + fcompiler_class[compiler][2])) + else: + c.dump_properties() + compilers.append(("fcompiler="+compiler, None, + fcompiler_class[compiler][2] + ' (%s)' % v)) + + compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) + compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) + for fc in compilers_ni] + + compilers.sort() + compilers_na.sort() + compilers_ni.sort() + pretty_printer = FancyGetopt(compilers) + pretty_printer.print_help("Fortran compilers found:") + pretty_printer = FancyGetopt(compilers_na) + pretty_printer.print_help("Compilers available for this " + "platform, but not found:") + if compilers_ni: + pretty_printer = FancyGetopt(compilers_ni) + pretty_printer.print_help("Compilers not available on this platform:") + print("For compiler details, run 'config_fc --verbose' setup command.") + + +def dummy_fortran_file(): + fo, name = make_temp_file(suffix='.f') + fo.write(" subroutine dummy()\n end\n") + fo.close() + return name[:-2] + + +_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search +_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search +_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search +_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match + +def is_free_format(file): + """Check if file is in free format Fortran.""" + # f90 allows both fixed and free format, assuming fixed unless + # signs of free format are detected. + result = 0 + with open(file, encoding='latin1') as f: + line = f.readline() + n = 10000 # the number of non-comment lines to scan for hints + if _has_f_header(line) or _has_fix_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = 1 + while n>0 and line: + line = line.rstrip() + if line and line[0]!='!': + n -= 1 + if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': + result = 1 + break + line = f.readline() + return result + +def has_f90_header(src): + with open(src, encoding='latin1') as f: + line = f.readline() + return _has_f90_header(line) or _has_fix_header(line) + +_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)', re.I) +def get_f77flags(src): + """ + Search the first 20 lines of fortran 77 code for line pattern + `CF77FLAGS()=` + Return a dictionary {:}. + """ + flags = {} + with open(src, encoding='latin1') as f: + i = 0 + for line in f: + i += 1 + if i>20: break + m = _f77flags_re.match(line) + if not m: continue + fcname = m.group('fcname').strip() + fflags = m.group('fflags').strip() + flags[fcname] = split_quoted(fflags) + return flags + +# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags + +if __name__ == '__main__': + show_fcompilers() diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..760b73006fe029b27cfff500cb1d7109e9f47845 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/absoft.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/absoft.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42baceb21ba9ad13cfafbe903b335d4f099de46a Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/absoft.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/arm.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/arm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..148e8134a72a4d8aaa7b35e5c1f492ac4d87406e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/arm.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/compaq.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/compaq.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2210bbbc5f6ba425c7d5248c270d641deecf1d88 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/compaq.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/environment.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/environment.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6d49540793355053d0b4c8a71a6a111998f7adb Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/environment.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/fujitsu.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/fujitsu.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e87de635b852239a972b318fe7eb630bd904a892 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/fujitsu.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/g95.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/g95.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3713ff17cb5344269e3047d00c0a3da44347363d Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/g95.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/gnu.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/gnu.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45da27c22fca1bef1996719018fa99883137421d Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/gnu.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/hpux.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/hpux.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26baa91a3c2e6e0a71c10ad59c543f6cd60bcb9f Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/hpux.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/ibm.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/ibm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67a6a12906bb07b6792aef1edb09d2be96c23dfe Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/ibm.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/intel.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/intel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..527cd7bb148cf4441cf7cebf3bf3053670c6a5f2 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/intel.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/lahey.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/lahey.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78379b8194aaaa5a95b15efa11e1b66e9bcb2f52 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/lahey.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/mips.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/mips.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db77347fe566fe158f0b901ae769dc80b1dde8f8 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/mips.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/nag.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/nag.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3185c8ab6e4b8dffe15c0620ebbfcf4463a2fbd8 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/nag.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/none.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/none.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e28b781332646f07705a959e0bfe36df10f081a Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/none.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/nv.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/nv.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7cdffb435b529e02cf0f3452c2587629a6a3e15 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/nv.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/pathf95.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/pathf95.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce041cc323463f4a50107b055c94618d06a9c891 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/pathf95.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/pg.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/pg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4344309933d2982bea46f077068097dd37b02533 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/pg.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/sun.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/sun.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c96d38ae45d7e5a1de3de559ce9820188d24266 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/sun.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/vast.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/vast.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04336ae42bbfdfa3ddf5a562df46fb9315093e44 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/vast.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/absoft.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/absoft.py new file mode 100644 index 0000000000000000000000000000000000000000..7035dec1cabe108cb237c790fa17a7c811a88be0 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/absoft.py @@ -0,0 +1,156 @@ + +# Absoft Corporation ceased operations on 12/31/2022. +# Thus, all links to are invalid. + +# Notes: +# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py +# generated extension modules (works for f2py v2.45.241_1936 and up) +import os + +from numpy.distutils.cpuinfo import cpu +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file +from numpy.distutils.misc_util import cyg2win32 + +compilers = ['AbsoftFCompiler'] + +class AbsoftFCompiler(FCompiler): + + compiler_type = 'absoft' + description = 'Absoft Corp Fortran Compiler' + #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' + version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ + r' (?P[^\s*,]*)(.*?Absoft Corp|)' + + # on windows: f90 -V -c dummy.f + # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 + + # samt5735(8)$ f90 -V -c dummy.f + # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 + # Note that fink installs g77 as f77, so need to use f90 for detection. + + executables = { + 'version_cmd' : None, # set by update_executables + 'compiler_f77' : ["f77"], + 'compiler_fix' : ["f90"], + 'compiler_f90' : ["f90"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + if os.name=='nt': + library_switch = '/out:' #No space after /out:! + + module_dir_switch = None + module_include_switch = '-p' + + def update_executables(self): + f = cyg2win32(dummy_fortran_file()) + self.executables['version_cmd'] = ['', '-V', '-c', + f+'.f', '-o', f+'.o'] + + def get_flags_linker_so(self): + if os.name=='nt': + opt = ['/dll'] + # The "-K shared" switches are being left in for pre-9.0 versions + # of Absoft though I don't think versions earlier than 9 can + # actually be used to build shared libraries. In fact, version + # 8 of Absoft doesn't recognize "-K shared" and will fail. + elif self.get_version() >= '9.0': + opt = ['-shared'] + else: + opt = ["-K", "shared"] + return opt + + def library_dir_option(self, dir): + if os.name=='nt': + return ['-link', '/PATH:%s' % (dir)] + return "-L" + dir + + def library_option(self, lib): + if os.name=='nt': + return '%s.lib' % (lib) + return "-l" + lib + + def get_library_dirs(self): + opt = FCompiler.get_library_dirs(self) + d = os.environ.get('ABSOFT') + if d: + if self.get_version() >= '10.0': + # use shared libraries, the static libraries were not compiled -fPIC + prefix = 'sh' + else: + prefix = '' + if cpu.is_64bit(): + suffix = '64' + else: + suffix = '' + opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) + return opt + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + if self.get_version() >= '11.0': + opt.extend(['af90math', 'afio', 'af77math', 'amisc']) + elif self.get_version() >= '10.0': + opt.extend(['af90math', 'afio', 'af77math', 'U77']) + elif self.get_version() >= '8.0': + opt.extend(['f90math', 'fio', 'f77math', 'U77']) + else: + opt.extend(['fio', 'f90math', 'fmath', 'U77']) + if os.name =='nt': + opt.append('COMDLG32') + return opt + + def get_flags(self): + opt = FCompiler.get_flags(self) + if os.name != 'nt': + opt.extend(['-s']) + if self.get_version(): + if self.get_version()>='8.2': + opt.append('-fpic') + return opt + + def get_flags_f77(self): + opt = FCompiler.get_flags_f77(self) + opt.extend(['-N22', '-N90', '-N110']) + v = self.get_version() + if os.name == 'nt': + if v and v>='8.0': + opt.extend(['-f', '-N15']) + else: + opt.append('-f') + if v: + if v<='4.6': + opt.append('-B108') + else: + # Though -N15 is undocumented, it works with + # Absoft 8.0 on Linux + opt.append('-N15') + return opt + + def get_flags_f90(self): + opt = FCompiler.get_flags_f90(self) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) + if self.get_version(): + if self.get_version()>'4.6': + opt.extend(["-YDEALLOC=ALL"]) + return opt + + def get_flags_fix(self): + opt = FCompiler.get_flags_fix(self) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) + opt.extend(["-f", "fixed"]) + return opt + + def get_flags_opt(self): + opt = ['-O'] + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='absoft').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/arm.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/arm.py new file mode 100644 index 0000000000000000000000000000000000000000..c519d529715bb58e05afb01dbc09186d56875252 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/arm.py @@ -0,0 +1,71 @@ +import sys + +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file +from sys import platform +from os.path import join, dirname, normpath + +compilers = ['ArmFlangCompiler'] + +import functools + +class ArmFlangCompiler(FCompiler): + compiler_type = 'arm' + description = 'Arm Compiler' + version_pattern = r'\s*Arm.*version (?P[\d.-]+).*' + + ar_exe = 'lib.exe' + possible_executables = ['armflang'] + + executables = { + 'version_cmd': ["", "--version"], + 'compiler_f77': ["armflang", "-fPIC"], + 'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"], + 'compiler_f90': ["armflang", "-fPIC"], + 'linker_so': ["armflang", "-fPIC", "-shared"], + 'archiver': ["ar", "-cr"], + 'ranlib': None + } + + pic_flags = ["-fPIC", "-DPIC"] + c_compiler = 'arm' + module_dir_switch = '-module ' # Don't remove ending space! + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + opt.extend(['flang', 'flangrti', 'ompstub']) + return opt + + @functools.lru_cache(maxsize=128) + def get_library_dirs(self): + """List of compiler library directories.""" + opt = FCompiler.get_library_dirs(self) + flang_dir = dirname(self.executables['compiler_f77'][0]) + opt.append(normpath(join(flang_dir, '..', 'lib'))) + + return opt + + def get_flags(self): + return [] + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + return ['-O3'] + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + return '-Wl,-rpath=%s' % dir + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='armflang').get_version()) + diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/compaq.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/compaq.py new file mode 100644 index 0000000000000000000000000000000000000000..6f885ee74c22f3a9552cbcc85f287ed0c1732a0a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/compaq.py @@ -0,0 +1,120 @@ + +#http://www.compaq.com/fortran/docs/ +import os +import sys + +from numpy.distutils.fcompiler import FCompiler +from distutils.errors import DistutilsPlatformError + +compilers = ['CompaqFCompiler'] +if os.name != 'posix' or sys.platform[:6] == 'cygwin' : + # Otherwise we'd get a false positive on posix systems with + # case-insensitive filesystems (like darwin), because we'll pick + # up /bin/df + compilers.append('CompaqVisualFCompiler') + +class CompaqFCompiler(FCompiler): + + compiler_type = 'compaq' + description = 'Compaq Fortran Compiler' + version_pattern = r'Compaq Fortran (?P[^\s]*).*' + + if sys.platform[:5]=='linux': + fc_exe = 'fort' + else: + fc_exe = 'f90' + + executables = { + 'version_cmd' : ['', "-version"], + 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], + 'compiler_fix' : [fc_exe, "-fixed"], + 'compiler_f90' : [fc_exe], + 'linker_so' : [''], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + module_dir_switch = '-module ' # not tested + module_include_switch = '-I' + + def get_flags(self): + return ['-assume no2underscore', '-nomixed_str_len_arg'] + def get_flags_debug(self): + return ['-g', '-check bounds'] + def get_flags_opt(self): + return ['-O4', '-align dcommons', '-assume bigarrays', + '-assume nozsize', '-math_library fast'] + def get_flags_arch(self): + return ['-arch host', '-tune host'] + def get_flags_linker_so(self): + if sys.platform[:5]=='linux': + return ['-shared'] + return ['-shared', '-Wl,-expect_unresolved,*'] + +class CompaqVisualFCompiler(FCompiler): + + compiler_type = 'compaqv' + description = 'DIGITAL or Compaq Visual Fortran Compiler' + version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler' + r' Version (?P[^\s]*).*') + + compile_switch = '/compile_only' + object_switch = '/object:' + library_switch = '/OUT:' #No space after /OUT:! + + static_lib_extension = ".lib" + static_lib_format = "%s%s" + module_dir_switch = '/module:' + module_include_switch = '/I' + + ar_exe = 'lib.exe' + fc_exe = 'DF' + + if sys.platform=='win32': + from numpy.distutils.msvccompiler import MSVCCompiler + + try: + m = MSVCCompiler() + m.initialize() + ar_exe = m.lib + except DistutilsPlatformError: + pass + except AttributeError as e: + if '_MSVCCompiler__root' in str(e): + print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e)) + else: + raise + except OSError as e: + if not "vcvarsall.bat" in str(e): + print("Unexpected OSError in", __file__) + raise + except ValueError as e: + if not "'path'" in str(e): + print("Unexpected ValueError in", __file__) + raise + + executables = { + 'version_cmd' : ['', "/what"], + 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], + 'compiler_fix' : [fc_exe, "/fixed"], + 'compiler_f90' : [fc_exe], + 'linker_so' : [''], + 'archiver' : [ar_exe, "/OUT:"], + 'ranlib' : None + } + + def get_flags(self): + return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', + '/names:lowercase', '/assume:underscore'] + def get_flags_opt(self): + return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] + def get_flags_arch(self): + return ['/threads'] + def get_flags_debug(self): + return ['/debug'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='compaq').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/environment.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/environment.py new file mode 100644 index 0000000000000000000000000000000000000000..defa2db226d51ecce028ce286e5dbc2bd1bab2a4 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/environment.py @@ -0,0 +1,88 @@ +import os +from distutils.dist import Distribution + +__metaclass__ = type + +class EnvironmentConfig: + def __init__(self, distutils_section='ALL', **kw): + self._distutils_section = distutils_section + self._conf_keys = kw + self._conf = None + self._hook_handler = None + + def dump_variable(self, name): + conf_desc = self._conf_keys[name] + hook, envvar, confvar, convert, append = conf_desc + if not convert: + convert = lambda x : x + print('%s.%s:' % (self._distutils_section, name)) + v = self._hook_handler(name, hook) + print(' hook : %s' % (convert(v),)) + if envvar: + v = os.environ.get(envvar, None) + print(' environ: %s' % (convert(v),)) + if confvar and self._conf: + v = self._conf.get(confvar, (None, None))[1] + print(' config : %s' % (convert(v),)) + + def dump_variables(self): + for name in self._conf_keys: + self.dump_variable(name) + + def __getattr__(self, name): + try: + conf_desc = self._conf_keys[name] + except KeyError: + raise AttributeError( + f"'EnvironmentConfig' object has no attribute '{name}'" + ) from None + + return self._get_var(name, conf_desc) + + def get(self, name, default=None): + try: + conf_desc = self._conf_keys[name] + except KeyError: + return default + var = self._get_var(name, conf_desc) + if var is None: + var = default + return var + + def _get_var(self, name, conf_desc): + hook, envvar, confvar, convert, append = conf_desc + if convert is None: + convert = lambda x: x + var = self._hook_handler(name, hook) + if envvar is not None: + envvar_contents = os.environ.get(envvar) + if envvar_contents is not None: + envvar_contents = convert(envvar_contents) + if var and append: + if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1': + var.extend(envvar_contents) + else: + # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0 + # to keep old (overwrite flags rather than append to + # them) behavior + var = envvar_contents + else: + var = envvar_contents + if confvar is not None and self._conf: + if confvar in self._conf: + source, confvar_contents = self._conf[confvar] + var = convert(confvar_contents) + return var + + + def clone(self, hook_handler): + ec = self.__class__(distutils_section=self._distutils_section, + **self._conf_keys) + ec._hook_handler = hook_handler + return ec + + def use_distribution(self, dist): + if isinstance(dist, Distribution): + self._conf = dist.get_option_dict(self._distutils_section) + else: + self._conf = dist diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/fujitsu.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/fujitsu.py new file mode 100644 index 0000000000000000000000000000000000000000..21562509368738567ad339d092155fe40672132e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/fujitsu.py @@ -0,0 +1,46 @@ +""" +fujitsu + +Supports Fujitsu compiler function. +This compiler is developed by Fujitsu and is used in A64FX on Fugaku. +""" +from numpy.distutils.fcompiler import FCompiler + +compilers = ['FujitsuFCompiler'] + +class FujitsuFCompiler(FCompiler): + compiler_type = 'fujitsu' + description = 'Fujitsu Fortran Compiler' + + possible_executables = ['frt'] + version_pattern = r'frt \(FRT\) (?P[a-z\d.]+)' + # $ frt --version + # frt (FRT) x.x.x yyyymmdd + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["frt", "-Fixed"], + 'compiler_fix' : ["frt", "-Fixed"], + 'compiler_f90' : ["frt"], + 'linker_so' : ["frt", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-KPIC'] + module_dir_switch = '-M' + module_include_switch = '-I' + + def get_flags_opt(self): + return ['-O3'] + def get_flags_debug(self): + return ['-g'] + def runtime_library_dir_option(self, dir): + return f'-Wl,-rpath={dir}' + def get_libraries(self): + return ['fj90f', 'fj90i', 'fjsrcinfo'] + +if __name__ == '__main__': + from distutils import log + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + print(customized_fcompiler('fujitsu').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/g95.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/g95.py new file mode 100644 index 0000000000000000000000000000000000000000..847cb0c04bad6000908d01e7911f8fb9ac76c027 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/g95.py @@ -0,0 +1,42 @@ +# http://g95.sourceforge.net/ +from numpy.distutils.fcompiler import FCompiler + +compilers = ['G95FCompiler'] + +class G95FCompiler(FCompiler): + compiler_type = 'g95' + description = 'G95 Fortran Compiler' + +# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' + # $ g95 --version + # G95 (GCC 4.0.3 (g95!) May 22 2006) + + version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' + # $ g95 --version + # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["g95", "-ffixed-form"], + 'compiler_fix' : ["g95", "-ffixed-form"], + 'compiler_f90' : ["g95"], + 'linker_so' : ["", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-fpic'] + module_dir_switch = '-fmod=' + module_include_switch = '-I' + + def get_flags(self): + return ['-fno-second-underscore'] + def get_flags_opt(self): + return ['-O'] + def get_flags_debug(self): + return ['-g'] + +if __name__ == '__main__': + from distutils import log + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + print(customized_fcompiler('g95').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/gnu.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/gnu.py new file mode 100644 index 0000000000000000000000000000000000000000..2bb46c4b8432e054097b58efc8f4fcf4cc7c1ab9 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/gnu.py @@ -0,0 +1,555 @@ +import re +import os +import sys +import warnings +import platform +import tempfile +import hashlib +import base64 +import subprocess +from subprocess import Popen, PIPE, STDOUT +from numpy.distutils.exec_command import filepath_from_subprocess_output +from numpy.distutils.fcompiler import FCompiler +from distutils.version import LooseVersion + +compilers = ['GnuFCompiler', 'Gnu95FCompiler'] + +TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") + +# XXX: handle cross compilation + + +def is_win64(): + return sys.platform == "win32" and platform.architecture()[0] == "64bit" + + +class GnuFCompiler(FCompiler): + compiler_type = 'gnu' + compiler_aliases = ('g77', ) + description = 'GNU Fortran 77 compiler' + + def gnu_version_match(self, version_string): + """Handle the different versions of GNU fortran compilers""" + # Strip warning(s) that may be emitted by gfortran + while version_string.startswith('gfortran: warning'): + version_string =\ + version_string[version_string.find('\n') + 1:].strip() + + # Gfortran versions from after 2010 will output a simple string + # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older + # gfortrans may still return long version strings (``-dumpversion`` was + # an alias for ``--version``) + if len(version_string) <= 20: + # Try to find a valid version string + m = re.search(r'([0-9.]+)', version_string) + if m: + # g77 provides a longer version string that starts with GNU + # Fortran + if version_string.startswith('GNU Fortran'): + return ('g77', m.group(1)) + + # gfortran only outputs a version string such as #.#.#, so check + # if the match is at the start of the string + elif m.start() == 0: + return ('gfortran', m.group(1)) + else: + # Output probably from --version, try harder: + m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) + if m: + return ('gfortran', m.group(1)) + m = re.search( + r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string) + if m: + v = m.group(1) + if v.startswith('0') or v.startswith('2') or v.startswith('3'): + # the '0' is for early g77's + return ('g77', v) + else: + # at some point in the 4.x series, the ' 95' was dropped + # from the version string + return ('gfortran', v) + + # If still nothing, raise an error to make the problem easy to find. + err = 'A valid Fortran version was not found in this string:\n' + raise ValueError(err + version_string) + + def version_match(self, version_string): + v = self.gnu_version_match(version_string) + if not v or v[0] != 'g77': + return None + return v[1] + + possible_executables = ['g77', 'f77'] + executables = { + 'version_cmd' : [None, "-dumpversion"], + 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], + 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes + 'compiler_fix' : None, + 'linker_so' : [None, "-g", "-Wall"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"], + 'linker_exe' : [None, "-g", "-Wall"] + } + module_dir_switch = None + module_include_switch = None + + # Cygwin: f771: warning: -fPIC ignored for target (all code is + # position independent) + if os.name != 'nt' and sys.platform != 'cygwin': + pic_flags = ['-fPIC'] + + # use -mno-cygwin for g77 when Python is not Cygwin-Python + if sys.platform == 'win32': + for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: + executables[key].append('-mno-cygwin') + + g2c = 'g2c' + suggested_f90_compiler = 'gnu95' + + def get_flags_linker_so(self): + opt = self.linker_so[1:] + if sys.platform == 'darwin': + target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) + # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value + # and leave it alone. But, distutils will complain if the + # environment's value is different from the one in the Python + # Makefile used to build Python. We let distutils handle this + # error checking. + if not target: + # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, + # we try to get it first from sysconfig and then + # fall back to setting it to 10.9 This is a reasonable default + # even when using the official Python dist and those derived + # from it. + import sysconfig + target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') + if not target: + target = '10.9' + s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}' + warnings.warn(s, stacklevel=2) + os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target) + opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) + else: + opt.append("-shared") + if sys.platform.startswith('sunos'): + # SunOS often has dynamically loaded symbols defined in the + # static library libg2c.a The linker doesn't like this. To + # ignore the problem, use the -mimpure-text flag. It isn't + # the safest thing, but seems to work. 'man gcc' says: + # ".. Instead of using -mimpure-text, you should compile all + # source code with -fpic or -fPIC." + opt.append('-mimpure-text') + return opt + + def get_libgcc_dir(self): + try: + output = subprocess.check_output(self.compiler_f77 + + ['-print-libgcc-file-name']) + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + return os.path.dirname(output) + return None + + def get_libgfortran_dir(self): + if sys.platform[:5] == 'linux': + libgfortran_name = 'libgfortran.so' + elif sys.platform == 'darwin': + libgfortran_name = 'libgfortran.dylib' + else: + libgfortran_name = None + + libgfortran_dir = None + if libgfortran_name: + find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] + try: + output = subprocess.check_output( + self.compiler_f77 + find_lib_arg) + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + libgfortran_dir = os.path.dirname(output) + return libgfortran_dir + + def get_library_dirs(self): + opt = [] + if sys.platform[:5] != 'linux': + d = self.get_libgcc_dir() + if d: + # if windows and not cygwin, libg2c lies in a different folder + if sys.platform == 'win32' and not d.startswith('/usr/lib'): + d = os.path.normpath(d) + path = os.path.join(d, "lib%s.a" % self.g2c) + if not os.path.exists(path): + root = os.path.join(d, *((os.pardir, ) * 4)) + d2 = os.path.abspath(os.path.join(root, 'lib')) + path = os.path.join(d2, "lib%s.a" % self.g2c) + if os.path.exists(path): + opt.append(d2) + opt.append(d) + # For Macports / Linux, libgfortran and libgcc are not co-located + lib_gfortran_dir = self.get_libgfortran_dir() + if lib_gfortran_dir: + opt.append(lib_gfortran_dir) + return opt + + def get_libraries(self): + opt = [] + d = self.get_libgcc_dir() + if d is not None: + g2c = self.g2c + '-pic' + f = self.static_lib_format % (g2c, self.static_lib_extension) + if not os.path.isfile(os.path.join(d, f)): + g2c = self.g2c + else: + g2c = self.g2c + + if g2c is not None: + opt.append(g2c) + c_compiler = self.c_compiler + if sys.platform == 'win32' and c_compiler and \ + c_compiler.compiler_type == 'msvc': + opt.append('gcc') + if sys.platform == 'darwin': + opt.append('cc_dynamic') + return opt + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + v = self.get_version() + if v and v <= '3.3.3': + # With this compiler version building Fortran BLAS/LAPACK + # with -O3 caused failures in lib.lapack heevr,syevr tests. + opt = ['-O2'] + else: + opt = ['-O3'] + opt.append('-funroll-loops') + return opt + + def _c_arch_flags(self): + """ Return detected arch flags from CFLAGS """ + import sysconfig + try: + cflags = sysconfig.get_config_vars()['CFLAGS'] + except KeyError: + return [] + arch_re = re.compile(r"-arch\s+(\w+)") + arch_flags = [] + for arch in arch_re.findall(cflags): + arch_flags += ['-arch', arch] + return arch_flags + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + if sys.platform == 'win32' or sys.platform == 'cygwin': + # Linux/Solaris/Unix support RPATH, Windows does not + raise NotImplementedError + + # TODO: could use -Xlinker here, if it's supported + assert "," not in dir + + if sys.platform == 'darwin': + return f'-Wl,-rpath,{dir}' + elif sys.platform.startswith(('aix', 'os400')): + # AIX RPATH is called LIBPATH + return f'-Wl,-blibpath:{dir}' + else: + return f'-Wl,-rpath={dir}' + + +class Gnu95FCompiler(GnuFCompiler): + compiler_type = 'gnu95' + compiler_aliases = ('gfortran', ) + description = 'GNU Fortran 95 compiler' + + def version_match(self, version_string): + v = self.gnu_version_match(version_string) + if not v or v[0] != 'gfortran': + return None + v = v[1] + if LooseVersion(v) >= "4": + # gcc-4 series releases do not support -mno-cygwin option + pass + else: + # use -mno-cygwin flag for gfortran when Python is not + # Cygwin-Python + if sys.platform == 'win32': + for key in [ + 'version_cmd', 'compiler_f77', 'compiler_f90', + 'compiler_fix', 'linker_so', 'linker_exe' + ]: + self.executables[key].append('-mno-cygwin') + return v + + possible_executables = ['gfortran', 'f95'] + executables = { + 'version_cmd' : ["", "-dumpversion"], + 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", + "-fno-second-underscore"], + 'compiler_f90' : [None, "-Wall", "-g", + "-fno-second-underscore"], + 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", + "-fno-second-underscore"], + 'linker_so' : ["", "-Wall", "-g"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"], + 'linker_exe' : [None, "-Wall"] + } + + module_dir_switch = '-J' + module_include_switch = '-I' + + if sys.platform.startswith(('aix', 'os400')): + executables['linker_so'].append('-lpthread') + if platform.architecture()[0][:2] == '64': + for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']: + executables[key].append('-maix64') + + g2c = 'gfortran' + + def _universal_flags(self, cmd): + """Return a list of -arch flags for every supported architecture.""" + if not sys.platform == 'darwin': + return [] + arch_flags = [] + # get arches the C compiler gets. + c_archs = self._c_arch_flags() + if "i386" in c_archs: + c_archs[c_archs.index("i386")] = "i686" + # check the arches the Fortran compiler supports, and compare with + # arch flags from C compiler + for arch in ["ppc", "i686", "x86_64", "ppc64", "s390x"]: + if _can_target(cmd, arch) and arch in c_archs: + arch_flags.extend(["-arch", arch]) + return arch_flags + + def get_flags(self): + flags = GnuFCompiler.get_flags(self) + arch_flags = self._universal_flags(self.compiler_f90) + if arch_flags: + flags[:0] = arch_flags + return flags + + def get_flags_linker_so(self): + flags = GnuFCompiler.get_flags_linker_so(self) + arch_flags = self._universal_flags(self.linker_so) + if arch_flags: + flags[:0] = arch_flags + return flags + + def get_library_dirs(self): + opt = GnuFCompiler.get_library_dirs(self) + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + target = self.get_target() + if target: + d = os.path.normpath(self.get_libgcc_dir()) + root = os.path.join(d, *((os.pardir, ) * 4)) + path = os.path.join(root, "lib") + mingwdir = os.path.normpath(path) + if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): + opt.append(mingwdir) + # For Macports / Linux, libgfortran and libgcc are not co-located + lib_gfortran_dir = self.get_libgfortran_dir() + if lib_gfortran_dir: + opt.append(lib_gfortran_dir) + return opt + + def get_libraries(self): + opt = GnuFCompiler.get_libraries(self) + if sys.platform == 'darwin': + opt.remove('cc_dynamic') + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + if "gcc" in opt: + i = opt.index("gcc") + opt.insert(i + 1, "mingwex") + opt.insert(i + 1, "mingw32") + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + return [] + else: + pass + return opt + + def get_target(self): + try: + p = subprocess.Popen( + self.compiler_f77 + ['-v'], + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + stdout, stderr = p.communicate() + output = (stdout or b"") + (stderr or b"") + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + m = TARGET_R.search(output) + if m: + return m.group(1) + return "" + + def _hash_files(self, filenames): + h = hashlib.sha1() + for fn in filenames: + with open(fn, 'rb') as f: + while True: + block = f.read(131072) + if not block: + break + h.update(block) + text = base64.b32encode(h.digest()) + text = text.decode('ascii') + return text.rstrip('=') + + def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, + chained_dlls, is_archive): + """Create a wrapper shared library for the given objects + + Return an MSVC-compatible lib + """ + + c_compiler = self.c_compiler + if c_compiler.compiler_type != "msvc": + raise ValueError("This method only supports MSVC") + + object_hash = self._hash_files(list(objects) + list(chained_dlls)) + + if is_win64(): + tag = 'win_amd64' + else: + tag = 'win32' + + basename = 'lib' + os.path.splitext( + os.path.basename(objects[0]))[0][:8] + root_name = basename + '.' + object_hash + '.gfortran-' + tag + dll_name = root_name + '.dll' + def_name = root_name + '.def' + lib_name = root_name + '.lib' + dll_path = os.path.join(extra_dll_dir, dll_name) + def_path = os.path.join(output_dir, def_name) + lib_path = os.path.join(output_dir, lib_name) + + if os.path.isfile(lib_path): + # Nothing to do + return lib_path, dll_path + + if is_archive: + objects = (["-Wl,--whole-archive"] + list(objects) + + ["-Wl,--no-whole-archive"]) + self.link_shared_object( + objects, + dll_name, + output_dir=extra_dll_dir, + extra_postargs=list(chained_dlls) + [ + '-Wl,--allow-multiple-definition', + '-Wl,--output-def,' + def_path, + '-Wl,--export-all-symbols', + '-Wl,--enable-auto-import', + '-static', + '-mlong-double-64', + ]) + + # No PowerPC! + if is_win64(): + specifier = '/MACHINE:X64' + else: + specifier = '/MACHINE:X86' + + # MSVC specific code + lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] + if not c_compiler.initialized: + c_compiler.initialize() + c_compiler.spawn([c_compiler.lib] + lib_args) + + return lib_path, dll_path + + def can_ccompiler_link(self, compiler): + # MSVC cannot link objects compiled by GNU fortran + return compiler.compiler_type not in ("msvc", ) + + def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): + """ + Convert a set of object files that are not compatible with the default + linker, to a file that is compatible. + """ + if self.c_compiler.compiler_type == "msvc": + # Compile a DLL and return the lib for the DLL as + # the object. Also keep track of previous DLLs that + # we have compiled so that we can link against them. + + # If there are .a archives, assume they are self-contained + # static libraries, and build separate DLLs for each + archives = [] + plain_objects = [] + for obj in objects: + if obj.lower().endswith('.a'): + archives.append(obj) + else: + plain_objects.append(obj) + + chained_libs = [] + chained_dlls = [] + for archive in archives[::-1]: + lib, dll = self._link_wrapper_lib( + [archive], + output_dir, + extra_dll_dir, + chained_dlls=chained_dlls, + is_archive=True) + chained_libs.insert(0, lib) + chained_dlls.insert(0, dll) + + if not plain_objects: + return chained_libs + + lib, dll = self._link_wrapper_lib( + plain_objects, + output_dir, + extra_dll_dir, + chained_dlls=chained_dlls, + is_archive=False) + return [lib] + chained_libs + else: + raise ValueError("Unsupported C compiler") + + +def _can_target(cmd, arch): + """Return true if the architecture supports the -arch flag""" + newcmd = cmd[:] + fid, filename = tempfile.mkstemp(suffix=".f") + os.close(fid) + try: + d = os.path.dirname(filename) + output = os.path.splitext(filename)[0] + ".o" + try: + newcmd.extend(["-arch", arch, "-c", filename]) + p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) + p.communicate() + return p.returncode == 0 + finally: + if os.path.exists(output): + os.remove(output) + finally: + os.remove(filename) + + +if __name__ == '__main__': + from distutils import log + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + + print(customized_fcompiler('gnu').get_version()) + try: + print(customized_fcompiler('g95').get_version()) + except Exception as e: + print(e) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/hpux.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/hpux.py new file mode 100644 index 0000000000000000000000000000000000000000..66ad243c3cb5b54d78392eda27a58a332f397f0f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/hpux.py @@ -0,0 +1,41 @@ +from numpy.distutils.fcompiler import FCompiler + +compilers = ['HPUXFCompiler'] + +class HPUXFCompiler(FCompiler): + + compiler_type = 'hpux' + description = 'HP Fortran 90 Compiler' + version_pattern = r'HP F90 (?P[^\s*,]*)' + + executables = { + 'version_cmd' : ["f90", "+version"], + 'compiler_f77' : ["f90"], + 'compiler_fix' : ["f90"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["ld", "-b"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = None #XXX: fix me + module_include_switch = None #XXX: fix me + pic_flags = ['+Z'] + def get_flags(self): + return self.pic_flags + ['+ppu', '+DD64'] + def get_flags_opt(self): + return ['-O3'] + def get_libraries(self): + return ['m'] + def get_library_dirs(self): + opt = ['/usr/lib/hpux64'] + return opt + def get_version(self, force=0, ok_status=[256, 0, 1]): + # XXX status==256 may indicate 'unrecognized option' or + # 'no input file'. So, version_cmd needs more work. + return FCompiler.get_version(self, force, ok_status) + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(10) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='hpux').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/ibm.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/ibm.py new file mode 100644 index 0000000000000000000000000000000000000000..58739e45d21d6e9db6b352fee6589066fd10a8bc --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/ibm.py @@ -0,0 +1,97 @@ +import os +import re +import sys +import subprocess + +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils.exec_command import find_executable +from numpy.distutils.misc_util import make_temp_file +from distutils import log + +compilers = ['IBMFCompiler'] + +class IBMFCompiler(FCompiler): + compiler_type = 'ibm' + description = 'IBM XL Fortran Compiler' + version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' + #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 + + executables = { + 'version_cmd' : ["", "-qversion"], + 'compiler_f77' : ["xlf"], + 'compiler_fix' : ["xlf90", "-qfixed"], + 'compiler_f90' : ["xlf90"], + 'linker_so' : ["xlf95"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_version(self,*args,**kwds): + version = FCompiler.get_version(self,*args,**kwds) + + if version is None and sys.platform.startswith('aix'): + # use lslpp to find out xlf version + lslpp = find_executable('lslpp') + xlf = find_executable('xlf') + if os.path.exists(xlf) and os.path.exists(lslpp): + try: + o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) + except (OSError, subprocess.CalledProcessError): + pass + else: + m = re.search(r'xlfcmp:(?P\d+([.]\d+)+)', o) + if m: version = m.group('version') + + xlf_dir = '/etc/opt/ibmcmp/xlf' + if version is None and os.path.isdir(xlf_dir): + # linux: + # If the output of xlf does not contain version info + # (that's the case with xlf 8.1, for instance) then + # let's try another method: + l = sorted(os.listdir(xlf_dir)) + l.reverse() + l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] + if l: + from distutils.version import LooseVersion + self.version = version = LooseVersion(l[0]) + return version + + def get_flags(self): + return ['-qextname'] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_linker_so(self): + opt = [] + if sys.platform=='darwin': + opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') + else: + opt.append('-bshared') + version = self.get_version(ok_status=[0, 40]) + if version is not None: + if sys.platform.startswith('aix'): + xlf_cfg = '/etc/xlf.cfg' + else: + xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version + fo, new_cfg = make_temp_file(suffix='_xlf.cfg') + log.info('Creating '+new_cfg) + with open(xlf_cfg) as fi: + crt1_match = re.compile(r'\s*crt\s*=\s*(?P.*)/crt1.o').match + for line in fi: + m = crt1_match(line) + if m: + fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) + else: + fo.write(line) + fo.close() + opt.append('-F'+new_cfg) + return opt + + def get_flags_opt(self): + return ['-O3'] + +if __name__ == '__main__': + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + print(customized_fcompiler(compiler='ibm').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/intel.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/intel.py new file mode 100644 index 0000000000000000000000000000000000000000..c5f0d2e2bd6eab4541d8860b2cb8a6262fa63ad2 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/intel.py @@ -0,0 +1,211 @@ +# http://developer.intel.com/software/products/compilers/flin/ +import sys + +from numpy.distutils.ccompiler import simple_version_match +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file + +compilers = ['IntelFCompiler', 'IntelVisualFCompiler', + 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', + 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] + + +def intel_version_match(type): + # Match against the important stuff in the version string + return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) + + +class BaseIntelFCompiler(FCompiler): + def update_executables(self): + f = dummy_fortran_file() + self.executables['version_cmd'] = ['', '-FI', '-V', '-c', + f + '.f', '-o', f + '.o'] + + def runtime_library_dir_option(self, dir): + # TODO: could use -Xlinker here, if it's supported + assert "," not in dir + + return '-Wl,-rpath=%s' % dir + + +class IntelFCompiler(BaseIntelFCompiler): + + compiler_type = 'intel' + compiler_aliases = ('ifort',) + description = 'Intel Fortran Compiler for 32-bit apps' + version_match = intel_version_match('32-bit|IA-32') + + possible_executables = ['ifort', 'ifc'] + + executables = { + 'version_cmd' : None, # set by update_executables + 'compiler_f77' : [None, "-72", "-w90", "-w95"], + 'compiler_f90' : [None], + 'compiler_fix' : [None, "-FI"], + 'linker_so' : ["", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + pic_flags = ['-fPIC'] + module_dir_switch = '-module ' # Don't remove ending space! + module_include_switch = '-I' + + def get_flags_free(self): + return ['-FR'] + + def get_flags(self): + return ['-fPIC'] + + def get_flags_opt(self): # Scipy test failures with -O2 + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + return ['-fp-model', 'strict', '-O1', + '-assume', 'minus0', '-{}'.format(mpopt)] + + def get_flags_arch(self): + return [] + + def get_flags_linker_so(self): + opt = FCompiler.get_flags_linker_so(self) + v = self.get_version() + if v and v >= '8.0': + opt.append('-nofor_main') + if sys.platform == 'darwin': + # Here, it's -dynamiclib + try: + idx = opt.index('-shared') + opt.remove('-shared') + except ValueError: + idx = 0 + opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] + return opt + + +class IntelItaniumFCompiler(IntelFCompiler): + compiler_type = 'intele' + compiler_aliases = () + description = 'Intel Fortran Compiler for Itanium apps' + + version_match = intel_version_match('Itanium|IA-64') + + possible_executables = ['ifort', 'efort', 'efc'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + +class IntelEM64TFCompiler(IntelFCompiler): + compiler_type = 'intelem' + compiler_aliases = () + description = 'Intel Fortran Compiler for 64-bit apps' + + version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') + + possible_executables = ['ifort', 'efort', 'efc'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI"], + 'compiler_fix' : [None, "-FI"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + +# Is there no difference in the version string between the above compilers +# and the Visual compilers? + + +class IntelVisualFCompiler(BaseIntelFCompiler): + compiler_type = 'intelv' + description = 'Intel Visual Fortran Compiler for 32-bit apps' + version_match = intel_version_match('32-bit|IA-32') + + def update_executables(self): + f = dummy_fortran_file() + self.executables['version_cmd'] = ['', '/FI', '/c', + f + '.f', '/o', f + '.o'] + + ar_exe = 'lib.exe' + possible_executables = ['ifort', 'ifl'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None], + 'compiler_fix' : [None], + 'compiler_f90' : [None], + 'linker_so' : [None], + 'archiver' : [ar_exe, "/verbose", "/OUT:"], + 'ranlib' : None + } + + compile_switch = '/c ' + object_switch = '/Fo' # No space after /Fo! + library_switch = '/OUT:' # No space after /OUT:! + module_dir_switch = '/module:' # No space after /module: + module_include_switch = '/I' + + def get_flags(self): + opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', + '/assume:underscore', '/fpp'] + return opt + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['/4Yb', '/d2'] + + def get_flags_opt(self): + return ['/O1', '/assume:minus0'] # Scipy test failures with /O2 + + def get_flags_arch(self): + return ["/arch:IA32", "/QaxSSE3"] + + def runtime_library_dir_option(self, dir): + raise NotImplementedError + + +class IntelItaniumVisualFCompiler(IntelVisualFCompiler): + compiler_type = 'intelev' + description = 'Intel Visual Fortran Compiler for Itanium apps' + + version_match = intel_version_match('Itanium') + + possible_executables = ['efl'] # XXX this is a wild guess + ar_exe = IntelVisualFCompiler.ar_exe + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI", "-4L72", "-w"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : [ar_exe, "/verbose", "/OUT:"], + 'ranlib' : None + } + + +class IntelEM64VisualFCompiler(IntelVisualFCompiler): + compiler_type = 'intelvem' + description = 'Intel Visual Fortran Compiler for 64-bit apps' + + version_match = simple_version_match(start=r'Intel\(R\).*?64,') + + def get_flags_arch(self): + return [] + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='intel').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/lahey.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/lahey.py new file mode 100644 index 0000000000000000000000000000000000000000..d99c25fb1c1bb71847c929e1b46d36f879bc3fc5 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/lahey.py @@ -0,0 +1,45 @@ +import os + +from numpy.distutils.fcompiler import FCompiler + +compilers = ['LaheyFCompiler'] + +class LaheyFCompiler(FCompiler): + + compiler_type = 'lahey' + description = 'Lahey/Fujitsu Fortran 95 Compiler' + version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["lf95", "--fix"], + 'compiler_fix' : ["lf95", "--fix"], + 'compiler_f90' : ["lf95"], + 'linker_so' : ["lf95", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + module_dir_switch = None #XXX Fix me + module_include_switch = None #XXX Fix me + + def get_flags_opt(self): + return ['-O'] + def get_flags_debug(self): + return ['-g', '--chk', '--chkglobal'] + def get_library_dirs(self): + opt = [] + d = os.environ.get('LAHEY') + if d: + opt.append(os.path.join(d, 'lib')) + return opt + def get_libraries(self): + opt = [] + opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='lahey').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/mips.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/mips.py new file mode 100644 index 0000000000000000000000000000000000000000..67d2f8908c001d71c2f1aaf03cb84a304880b455 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/mips.py @@ -0,0 +1,54 @@ +from numpy.distutils.cpuinfo import cpu +from numpy.distutils.fcompiler import FCompiler + +compilers = ['MIPSFCompiler'] + +class MIPSFCompiler(FCompiler): + + compiler_type = 'mips' + description = 'MIPSpro Fortran Compiler' + version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' + + executables = { + 'version_cmd' : ["", "-version"], + 'compiler_f77' : ["f77", "-f77"], + 'compiler_fix' : ["f90", "-fixedform"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["f90", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : None + } + module_dir_switch = None #XXX: fix me + module_include_switch = None #XXX: fix me + pic_flags = ['-KPIC'] + + def get_flags(self): + return self.pic_flags + ['-n32'] + def get_flags_opt(self): + return ['-O3'] + def get_flags_arch(self): + opt = [] + for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): + if getattr(cpu, 'is_IP%s'%a)(): + opt.append('-TARG:platform=IP%s' % a) + break + return opt + def get_flags_arch_f77(self): + r = None + if cpu.is_r10000(): r = 10000 + elif cpu.is_r12000(): r = 12000 + elif cpu.is_r8000(): r = 8000 + elif cpu.is_r5000(): r = 5000 + elif cpu.is_r4000(): r = 4000 + if r is not None: + return ['r%s' % (r)] + return [] + def get_flags_arch_f90(self): + r = self.get_flags_arch_f77() + if r: + r[0] = '-' + r[0] + return r + +if __name__ == '__main__': + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='mips').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/nag.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/nag.py new file mode 100644 index 0000000000000000000000000000000000000000..e36b6c40d981754b647ac392064e4a6b79a39933 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/nag.py @@ -0,0 +1,87 @@ +import sys +import re +from numpy.distutils.fcompiler import FCompiler + +compilers = ['NAGFCompiler', 'NAGFORCompiler'] + +class BaseNAGFCompiler(FCompiler): + version_pattern = r'NAG.* Release (?P[^(\s]*)' + + def version_match(self, version_string): + m = re.search(self.version_pattern, version_string) + if m: + return m.group('version') + else: + return None + + def get_flags_linker_so(self): + return ["-Wl,-shared"] + def get_flags_opt(self): + return ['-O4'] + def get_flags_arch(self): + return [] + +class NAGFCompiler(BaseNAGFCompiler): + + compiler_type = 'nag' + description = 'NAGWare Fortran 95 Compiler' + + executables = { + 'version_cmd' : ["", "-V"], + 'compiler_f77' : ["f95", "-fixed"], + 'compiler_fix' : ["f95", "-fixed"], + 'compiler_f90' : ["f95"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags_linker_so(self): + if sys.platform == 'darwin': + return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] + return BaseNAGFCompiler.get_flags_linker_so(self) + def get_flags_arch(self): + version = self.get_version() + if version and version < '5.1': + return ['-target=native'] + else: + return BaseNAGFCompiler.get_flags_arch(self) + def get_flags_debug(self): + return ['-g', '-gline', '-g90', '-nan', '-C'] + +class NAGFORCompiler(BaseNAGFCompiler): + + compiler_type = 'nagfor' + description = 'NAG Fortran Compiler' + + executables = { + 'version_cmd' : ["nagfor", "-V"], + 'compiler_f77' : ["nagfor", "-fixed"], + 'compiler_fix' : ["nagfor", "-fixed"], + 'compiler_f90' : ["nagfor"], + 'linker_so' : ["nagfor"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags_linker_so(self): + if sys.platform == 'darwin': + return ['-unsharedrts', + '-Wl,-bundle,-flat_namespace,-undefined,suppress'] + return BaseNAGFCompiler.get_flags_linker_so(self) + def get_flags_debug(self): + version = self.get_version() + if version and version > '6.1': + return ['-g', '-u', '-nan', '-C=all', '-thread_safe', + '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] + else: + return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + compiler = customized_fcompiler(compiler='nagfor') + print(compiler.get_version()) + print(compiler.get_flags_debug()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/none.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/none.py new file mode 100644 index 0000000000000000000000000000000000000000..1279101219b3f259a2a3c7093c1c71e51c3745eb --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/none.py @@ -0,0 +1,28 @@ +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils import customized_fcompiler + +compilers = ['NoneFCompiler'] + +class NoneFCompiler(FCompiler): + + compiler_type = 'none' + description = 'Fake Fortran compiler' + + executables = {'compiler_f77': None, + 'compiler_f90': None, + 'compiler_fix': None, + 'linker_so': None, + 'linker_exe': None, + 'archiver': None, + 'ranlib': None, + 'version_cmd': None, + } + + def find_executables(self): + pass + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + print(customized_fcompiler(compiler='none').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/nv.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/nv.py new file mode 100644 index 0000000000000000000000000000000000000000..e15eb7936f788fbce9e9247e4137b13593fc443d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/nv.py @@ -0,0 +1,53 @@ +from numpy.distutils.fcompiler import FCompiler + +compilers = ['NVHPCFCompiler'] + +class NVHPCFCompiler(FCompiler): + """ NVIDIA High Performance Computing (HPC) SDK Fortran Compiler + + https://developer.nvidia.com/hpc-sdk + + Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers, + https://www.pgroup.com/index.htm. + See also `numpy.distutils.fcompiler.pg`. + """ + + compiler_type = 'nv' + description = 'NVIDIA HPC SDK' + version_pattern = r'\s*(nvfortran|.+ \(aka nvfortran\)) (?P[\d.-]+).*' + + executables = { + 'version_cmd': ["", "-V"], + 'compiler_f77': ["nvfortran"], + 'compiler_fix': ["nvfortran", "-Mfixed"], + 'compiler_f90': ["nvfortran"], + 'linker_so': [""], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] + } + pic_flags = ['-fpic'] + + module_dir_switch = '-module ' + module_include_switch = '-I' + + def get_flags(self): + opt = ['-Minform=inform', '-Mnosecond_underscore'] + return self.pic_flags + opt + + def get_flags_opt(self): + return ['-fast'] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_linker_so(self): + return ["-shared", '-fpic'] + + def runtime_library_dir_option(self, dir): + return '-R%s' % dir + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='nv').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/pathf95.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/pathf95.py new file mode 100644 index 0000000000000000000000000000000000000000..8efc32a90678b3d54d25f5b6cd82528116fb4d03 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/pathf95.py @@ -0,0 +1,33 @@ +from numpy.distutils.fcompiler import FCompiler + +compilers = ['PathScaleFCompiler'] + +class PathScaleFCompiler(FCompiler): + + compiler_type = 'pathf95' + description = 'PathScale Fortran Compiler' + version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' + + executables = { + 'version_cmd' : ["pathf95", "-version"], + 'compiler_f77' : ["pathf95", "-fixedform"], + 'compiler_fix' : ["pathf95", "-fixedform"], + 'compiler_f90' : ["pathf95"], + 'linker_so' : ["pathf95", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-fPIC'] + module_dir_switch = '-module ' # Don't remove ending space! + module_include_switch = '-I' + + def get_flags_opt(self): + return ['-O3'] + def get_flags_debug(self): + return ['-g'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='pathf95').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/pg.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/pg.py new file mode 100644 index 0000000000000000000000000000000000000000..90686849cc6982b244c9de8a2aeee13fdbb51d40 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/pg.py @@ -0,0 +1,128 @@ +# http://www.pgroup.com +import sys + +from numpy.distutils.fcompiler import FCompiler +from sys import platform +from os.path import join, dirname, normpath + +compilers = ['PGroupFCompiler', 'PGroupFlangCompiler'] + + +class PGroupFCompiler(FCompiler): + + compiler_type = 'pg' + description = 'Portland Group Fortran Compiler' + version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' + + if platform == 'darwin': + executables = { + 'version_cmd': ["", "-V"], + 'compiler_f77': ["pgfortran", "-dynamiclib"], + 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"], + 'compiler_f90': ["pgfortran", "-dynamiclib"], + 'linker_so': ["libtool"], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] + } + pic_flags = [''] + else: + executables = { + 'version_cmd': ["", "-V"], + 'compiler_f77': ["pgfortran"], + 'compiler_fix': ["pgfortran", "-Mfixed"], + 'compiler_f90': ["pgfortran"], + 'linker_so': [""], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] + } + pic_flags = ['-fpic'] + + module_dir_switch = '-module ' + module_include_switch = '-I' + + def get_flags(self): + opt = ['-Minform=inform', '-Mnosecond_underscore'] + return self.pic_flags + opt + + def get_flags_opt(self): + return ['-fast'] + + def get_flags_debug(self): + return ['-g'] + + if platform == 'darwin': + def get_flags_linker_so(self): + return ["-dynamic", '-undefined', 'dynamic_lookup'] + + else: + def get_flags_linker_so(self): + return ["-shared", '-fpic'] + + def runtime_library_dir_option(self, dir): + return '-R%s' % dir + + +import functools + +class PGroupFlangCompiler(FCompiler): + compiler_type = 'flang' + description = 'Portland Group Fortran LLVM Compiler' + version_pattern = r'\s*(flang|clang) version (?P[\d.-]+).*' + + ar_exe = 'lib.exe' + possible_executables = ['flang'] + + executables = { + 'version_cmd': ["", "--version"], + 'compiler_f77': ["flang"], + 'compiler_fix': ["flang"], + 'compiler_f90': ["flang"], + 'linker_so': [None], + 'archiver': [ar_exe, "/verbose", "/OUT:"], + 'ranlib': None + } + + library_switch = '/OUT:' # No space after /OUT:! + module_dir_switch = '-module ' # Don't remove ending space! + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + opt.extend(['flang', 'flangrti', 'ompstub']) + return opt + + @functools.lru_cache(maxsize=128) + def get_library_dirs(self): + """List of compiler library directories.""" + opt = FCompiler.get_library_dirs(self) + flang_dir = dirname(self.executables['compiler_f77'][0]) + opt.append(normpath(join(flang_dir, '..', 'lib'))) + + return opt + + def get_flags(self): + return [] + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + return ['-O3'] + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + raise NotImplementedError + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + if 'flang' in sys.argv: + print(customized_fcompiler(compiler='flang').get_version()) + else: + print(customized_fcompiler(compiler='pg').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/sun.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/sun.py new file mode 100644 index 0000000000000000000000000000000000000000..621b1cb196ea76622134c75cfa93f6f38775e840 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/sun.py @@ -0,0 +1,51 @@ +from numpy.distutils.ccompiler import simple_version_match +from numpy.distutils.fcompiler import FCompiler + +compilers = ['SunFCompiler'] + +class SunFCompiler(FCompiler): + + compiler_type = 'sun' + description = 'Sun or Forte Fortran 95 Compiler' + # ex: + # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 + version_match = simple_version_match( + start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') + + executables = { + 'version_cmd' : ["", "-V"], + 'compiler_f77' : ["f90"], + 'compiler_fix' : ["f90", "-fixed"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["", "-Bdynamic", "-G"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = '-moddir=' + module_include_switch = '-M' + pic_flags = ['-xcode=pic32'] + + def get_flags_f77(self): + ret = ["-ftrap=%none"] + if (self.get_version() or '') >= '7': + ret.append("-f77") + else: + ret.append("-fixed") + return ret + def get_opt(self): + return ['-fast', '-dalign'] + def get_arch(self): + return ['-xtarget=generic'] + def get_libraries(self): + opt = [] + opt.extend(['fsu', 'sunmath', 'mvec']) + return opt + + def runtime_library_dir_option(self, dir): + return '-R%s' % dir + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='sun').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/fcompiler/vast.py b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/vast.py new file mode 100644 index 0000000000000000000000000000000000000000..b087e7b67ca67074dedca824821641bbb9d4286b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fcompiler/vast.py @@ -0,0 +1,52 @@ +import os + +from numpy.distutils.fcompiler.gnu import GnuFCompiler + +compilers = ['VastFCompiler'] + +class VastFCompiler(GnuFCompiler): + compiler_type = 'vast' + compiler_aliases = () + description = 'Pacific-Sierra Research Fortran 90 Compiler' + version_pattern = (r'\s*Pacific-Sierra Research vf90 ' + r'(Personal|Professional)\s+(?P[^\s]*)') + + # VAST f90 does not support -o with -c. So, object files are created + # to the current directory and then moved to build directory + object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' + + executables = { + 'version_cmd' : ["vf90", "-v"], + 'compiler_f77' : ["g77"], + 'compiler_fix' : ["f90", "-Wv,-ya"], + 'compiler_f90' : ["f90"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = None #XXX Fix me + module_include_switch = None #XXX Fix me + + def find_executables(self): + pass + + def get_version_cmd(self): + f90 = self.compiler_f90[0] + d, b = os.path.split(f90) + vf90 = os.path.join(d, 'v'+b) + return vf90 + + def get_flags_arch(self): + vast_version = self.get_version() + gnu = GnuFCompiler() + gnu.customize(None) + self.version = gnu.get_version() + opt = GnuFCompiler.get_flags_arch(self) + self.version = vast_version + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='vast').get_version()) diff --git a/phivenv/Lib/site-packages/numpy/distutils/from_template.py b/phivenv/Lib/site-packages/numpy/distutils/from_template.py new file mode 100644 index 0000000000000000000000000000000000000000..4788985924065ca171383be72c913d68e56e279c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/from_template.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python3 +""" + +process_file(filename) + + takes templated file .xxx.src and produces .xxx file where .xxx + is .pyf .f90 or .f using the following template rules: + + '<..>' denotes a template. + + All function and subroutine blocks in a source file with names that + contain '<..>' will be replicated according to the rules in '<..>'. + + The number of comma-separated words in '<..>' will determine the number of + replicates. + + '<..>' may have two different forms, named and short. For example, + + named: + where anywhere inside a block '

' will be replaced with + 'd', 's', 'z', and 'c' for each replicate of the block. + + <_c> is already defined: <_c=s,d,c,z> + <_t> is already defined: <_t=real,double precision,complex,double complex> + + short: + , a short form of the named, useful when no

appears inside + a block. + + In general, '<..>' contains a comma separated list of arbitrary + expressions. If these expression must contain a comma|leftarrow|rightarrow, + then prepend the comma|leftarrow|rightarrow with a backslash. + + If an expression matches '\\' then it will be replaced + by -th expression. + + Note that all '<..>' forms in a block must have the same number of + comma-separated entries. + + Predefined named template rules: + + + + + + +""" +__all__ = ['process_str', 'process_file'] + +import os +import sys +import re + +routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) +routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) +function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) + +def parse_structure(astr): + """ Return a list of tuples for each function or subroutine each + tuple is the start and end of a subroutine or function to be + expanded. + """ + + spanlist = [] + ind = 0 + while True: + m = routine_start_re.search(astr, ind) + if m is None: + break + start = m.start() + if function_start_re.match(astr, start, m.end()): + while True: + i = astr.rfind('\n', ind, start) + if i==-1: + break + start = i + if astr[i:i+7]!='\n $': + break + start += 1 + m = routine_end_re.search(astr, m.end()) + ind = end = m and m.end()-1 or len(astr) + spanlist.append((start, end)) + return spanlist + +template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") +named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") +list_re = re.compile(r"<\s*((.*?))\s*>") + +def find_repl_patterns(astr): + reps = named_re.findall(astr) + names = {} + for rep in reps: + name = rep[0].strip() or unique_key(names) + repl = rep[1].replace(r'\,', '@comma@') + thelist = conv(repl) + names[name] = thelist + return names + +def find_and_remove_repl_patterns(astr): + names = find_repl_patterns(astr) + astr = re.subn(named_re, '', astr)[0] + return astr, names + +item_re = re.compile(r"\A\\(?P\d+)\Z") +def conv(astr): + b = astr.split(',') + l = [x.strip() for x in b] + for i in range(len(l)): + m = item_re.match(l[i]) + if m: + j = int(m.group('index')) + l[i] = l[j] + return ','.join(l) + +def unique_key(adict): + """ Obtain a unique key given a dictionary.""" + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = '__l%s' % (n) + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') +def expand_sub(substr, names): + substr = substr.replace(r'\>', '@rightarrow@') + substr = substr.replace(r'\<', '@leftarrow@') + lnames = find_repl_patterns(substr) + substr = named_re.sub(r"<\1>", substr) # get rid of definition templates + + def listrepl(mobj): + thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) + if template_name_re.match(thelist): + return "<%s>" % (thelist) + name = None + for key in lnames.keys(): # see if list is already in dictionary + if lnames[key] == thelist: + name = key + if name is None: # this list is not in the dictionary yet + name = unique_key(lnames) + lnames[name] = thelist + return "<%s>" % name + + substr = list_re.sub(listrepl, substr) # convert all lists to named templates + # newnames are constructed as needed + + numsubs = None + base_rule = None + rules = {} + for r in template_re.findall(substr): + if r not in rules: + thelist = lnames.get(r, names.get(r, None)) + if thelist is None: + raise ValueError('No replicates found for <%s>' % (r)) + if r not in names and not thelist.startswith('_'): + names[r] = thelist + rule = [i.replace('@comma@', ',') for i in thelist.split(',')] + num = len(rule) + + if numsubs is None: + numsubs = num + rules[r] = rule + base_rule = r + elif num == numsubs: + rules[r] = rule + else: + print("Mismatch in number of replacements (base <%s=%s>)" + " for <%s=%s>. Ignoring." % + (base_rule, ','.join(rules[base_rule]), r, thelist)) + if not rules: + return substr + + def namerepl(mobj): + name = mobj.group(1) + return rules.get(name, (k+1)*[name])[k] + + newstr = '' + for k in range(numsubs): + newstr += template_re.sub(namerepl, substr) + '\n\n' + + newstr = newstr.replace('@rightarrow@', '>') + newstr = newstr.replace('@leftarrow@', '<') + return newstr + +def process_str(allstr): + newstr = allstr + writestr = '' + + struct = parse_structure(newstr) + + oldend = 0 + names = {} + names.update(_special_names) + for sub in struct: + cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) + writestr += cleanedstr + names.update(defs) + writestr += expand_sub(newstr[sub[0]:sub[1]], names) + oldend = sub[1] + writestr += newstr[oldend:] + + return writestr + +include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + with open(source) as fid: + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + return lines + +def process_file(source): + lines = resolve_includes(source) + return process_str(''.join(lines)) + +_special_names = find_repl_patterns(''' +<_c=s,d,c,z> +<_t=real,double precision,complex,double complex> + + + + + +''') + +def main(): + try: + file = sys.argv[1] + except IndexError: + fid = sys.stdin + outfile = sys.stdout + else: + fid = open(file, 'r') + (base, ext) = os.path.splitext(file) + newname = base + outfile = open(newname, 'w') + + allstr = fid.read() + writestr = process_str(allstr) + outfile.write(writestr) + + +if __name__ == "__main__": + main() diff --git a/phivenv/Lib/site-packages/numpy/distutils/fujitsuccompiler.py b/phivenv/Lib/site-packages/numpy/distutils/fujitsuccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..47c00c3f66f21deeefdb00723cedcb784ed6e9ee --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/fujitsuccompiler.py @@ -0,0 +1,28 @@ +from distutils.unixccompiler import UnixCCompiler + +class FujitsuCCompiler(UnixCCompiler): + + """ + Fujitsu compiler. + """ + + compiler_type = 'fujitsu' + cc_exe = 'fcc' + cxx_exe = 'FCC' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + cc_compiler = self.cc_exe + cxx_compiler = self.cxx_exe + self.set_executables( + compiler=cc_compiler + + ' -O3 -Nclang -fPIC', + compiler_so=cc_compiler + + ' -O3 -Nclang -fPIC', + compiler_cxx=cxx_compiler + + ' -O3 -Nclang -fPIC', + linker_exe=cc_compiler + + ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared', + linker_so=cc_compiler + + ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared' + ) diff --git a/phivenv/Lib/site-packages/numpy/distutils/intelccompiler.py b/phivenv/Lib/site-packages/numpy/distutils/intelccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..35d92f84389b54b720e97c06044b88944f534d2b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/intelccompiler.py @@ -0,0 +1,111 @@ +import platform + +from distutils.unixccompiler import UnixCCompiler +from numpy.distutils.exec_command import find_executable +from numpy.distutils.ccompiler import simple_version_match +if platform.system() == 'Windows': + from numpy.distutils.msvc9compiler import MSVCCompiler + + +class IntelCCompiler(UnixCCompiler): + """A modified Intel compiler compatible with a GCC-built Python.""" + compiler_type = 'intel' + cc_exe = 'icc' + cc_args = 'fPIC' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' + '-fomit-frame-pointer -{}').format(mpopt) + compiler = self.cc_exe + + if platform.system() == 'Darwin': + shared_flag = '-Wl,-undefined,dynamic_lookup' + else: + shared_flag = '-shared' + self.set_executables(compiler=compiler, + compiler_so=compiler, + compiler_cxx=compiler, + archiver='xiar' + ' cru', + linker_exe=compiler + ' -shared-intel', + linker_so=compiler + ' ' + shared_flag + + ' -shared-intel') + + +class IntelItaniumCCompiler(IntelCCompiler): + compiler_type = 'intele' + + # On Itanium, the Intel Compiler used to be called ecc, let's search for + # it (now it's also icc, so ecc is last in the search). + for cc_exe in map(find_executable, ['icc', 'ecc']): + if cc_exe: + break + + +class IntelEM64TCCompiler(UnixCCompiler): + """ + A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. + """ + compiler_type = 'intelem' + cc_exe = 'icc -m64' + cc_args = '-fPIC' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 ' + '-fomit-frame-pointer -{}').format(mpopt) + compiler = self.cc_exe + + if platform.system() == 'Darwin': + shared_flag = '-Wl,-undefined,dynamic_lookup' + else: + shared_flag = '-shared' + self.set_executables(compiler=compiler, + compiler_so=compiler, + compiler_cxx=compiler, + archiver='xiar' + ' cru', + linker_exe=compiler + ' -shared-intel', + linker_so=compiler + ' ' + shared_flag + + ' -shared-intel') + + +if platform.system() == 'Windows': + class IntelCCompilerW(MSVCCompiler): + """ + A modified Intel compiler compatible with an MSVC-built Python. + """ + compiler_type = 'intelw' + compiler_cxx = 'icl' + + def __init__(self, verbose=0, dry_run=0, force=0): + MSVCCompiler.__init__(self, verbose, dry_run, force) + version_match = simple_version_match(start=r'Intel\(R\).*?32,') + self.__version = version_match + + def initialize(self, plat_name=None): + MSVCCompiler.initialize(self, plat_name) + self.cc = self.find_exe('icl.exe') + self.lib = self.find_exe('xilib') + self.linker = self.find_exe('xilink') + self.compile_options = ['/nologo', '/O3', '/MD', '/W3', + '/Qstd=c99'] + self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', + '/Qstd=c99', '/Z7', '/D_DEBUG'] + + class IntelEM64TCCompilerW(IntelCCompilerW): + """ + A modified Intel x86_64 compiler compatible with + a 64bit MSVC-built Python. + """ + compiler_type = 'intelemw' + + def __init__(self, verbose=0, dry_run=0, force=0): + MSVCCompiler.__init__(self, verbose, dry_run, force) + version_match = simple_version_match(start=r'Intel\(R\).*?64,') + self.__version = version_match diff --git a/phivenv/Lib/site-packages/numpy/distutils/lib2def.py b/phivenv/Lib/site-packages/numpy/distutils/lib2def.py new file mode 100644 index 0000000000000000000000000000000000000000..b284ffd57b3e70489f4d08f0549676a540004711 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/lib2def.py @@ -0,0 +1,116 @@ +import re +import sys +import subprocess + +__doc__ = """This module generates a DEF file from the symbols in +an MSVC-compiled DLL import library. It correctly discriminates between +data and functions. The data is collected from the output of the program +nm(1). + +Usage: + python lib2def.py [libname.lib] [output.def] +or + python lib2def.py [libname.lib] > output.def + +libname.lib defaults to python.lib and output.def defaults to stdout + +Author: Robert Kern +Last Update: April 30, 1999 +""" + +__version__ = '0.1a' + +py_ver = "%d%d" % tuple(sys.version_info[:2]) + +DEFAULT_NM = ['nm', '-Cs'] + +DEF_HEADER = """LIBRARY python%s.dll +;CODE PRELOAD MOVEABLE DISCARDABLE +;DATA PRELOAD SINGLE + +EXPORTS +""" % py_ver +# the header of the DEF file + +FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) +DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) + +def parse_cmd(): + """Parses the command-line arguments. + +libfile, deffile = parse_cmd()""" + if len(sys.argv) == 3: + if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': + libfile, deffile = sys.argv[1:] + elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': + deffile, libfile = sys.argv[1:] + else: + print("I'm assuming that your first argument is the library") + print("and the second is the DEF file.") + elif len(sys.argv) == 2: + if sys.argv[1][-4:] == '.def': + deffile = sys.argv[1] + libfile = 'python%s.lib' % py_ver + elif sys.argv[1][-4:] == '.lib': + deffile = None + libfile = sys.argv[1] + else: + libfile = 'python%s.lib' % py_ver + deffile = None + return libfile, deffile + +def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True): + """Returns the output of nm_cmd via a pipe. + +nm_output = getnm(nm_cmd = 'nm -Cs py_lib')""" + p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, text=True) + nm_output, nm_err = p.communicate() + if p.returncode != 0: + raise RuntimeError('failed to run "%s": "%s"' % ( + ' '.join(nm_cmd), nm_err)) + return nm_output + +def parse_nm(nm_output): + """Returns a tuple of lists: dlist for the list of data +symbols and flist for the list of function symbols. + +dlist, flist = parse_nm(nm_output)""" + data = DATA_RE.findall(nm_output) + func = FUNC_RE.findall(nm_output) + + flist = [] + for sym in data: + if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): + flist.append(sym) + + dlist = [] + for sym in data: + if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): + dlist.append(sym) + + dlist.sort() + flist.sort() + return dlist, flist + +def output_def(dlist, flist, header, file = sys.stdout): + """Outputs the final DEF file to a file defaulting to stdout. + +output_def(dlist, flist, header, file = sys.stdout)""" + for data_sym in dlist: + header = header + '\t%s DATA\n' % data_sym + header = header + '\n' # blank line + for func_sym in flist: + header = header + '\t%s\n' % func_sym + file.write(header) + +if __name__ == '__main__': + libfile, deffile = parse_cmd() + if deffile is None: + deffile = sys.stdout + else: + deffile = open(deffile, 'w') + nm_cmd = DEFAULT_NM + [str(libfile)] + nm_output = getnm(nm_cmd, shell=False) + dlist, flist = parse_nm(nm_output) + output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/phivenv/Lib/site-packages/numpy/distutils/line_endings.py b/phivenv/Lib/site-packages/numpy/distutils/line_endings.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d0dadef6b442456ebbf394d67bfd5c3b3b1950 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/line_endings.py @@ -0,0 +1,77 @@ +""" Functions for converting from DOS to UNIX line endings + +""" +import os +import re +import sys + + +def dos2unix(file): + "Replace CRLF with LF in argument files. Print names of changed files." + if os.path.isdir(file): + print(file, "Directory!") + return + + with open(file, "rb") as fp: + data = fp.read() + if '\0' in data: + print(file, "Binary!") + return + + newdata = re.sub("\r\n", "\n", data) + if newdata != data: + print('dos2unix:', file) + with open(file, "wb") as f: + f.write(newdata) + return file + else: + print(file, 'ok') + +def dos2unix_one_dir(modified_files, dir_name, file_names): + for file in file_names: + full_path = os.path.join(dir_name, file) + file = dos2unix(full_path) + if file is not None: + modified_files.append(file) + +def dos2unix_dir(dir_name): + modified_files = [] + os.path.walk(dir_name, dos2unix_one_dir, modified_files) + return modified_files +#---------------------------------- + +def unix2dos(file): + "Replace LF with CRLF in argument files. Print names of changed files." + if os.path.isdir(file): + print(file, "Directory!") + return + + with open(file, "rb") as fp: + data = fp.read() + if '\0' in data: + print(file, "Binary!") + return + newdata = re.sub("\r\n", "\n", data) + newdata = re.sub("\n", "\r\n", newdata) + if newdata != data: + print('unix2dos:', file) + with open(file, "wb") as f: + f.write(newdata) + return file + else: + print(file, 'ok') + +def unix2dos_one_dir(modified_files, dir_name, file_names): + for file in file_names: + full_path = os.path.join(dir_name, file) + unix2dos(full_path) + if file is not None: + modified_files.append(file) + +def unix2dos_dir(dir_name): + modified_files = [] + os.path.walk(dir_name, unix2dos_one_dir, modified_files) + return modified_files + +if __name__ == "__main__": + dos2unix_dir(sys.argv[1]) diff --git a/phivenv/Lib/site-packages/numpy/distutils/log.py b/phivenv/Lib/site-packages/numpy/distutils/log.py new file mode 100644 index 0000000000000000000000000000000000000000..bc0af5ab100aa9268a87c94c767a0251538a4660 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/log.py @@ -0,0 +1,111 @@ +# Colored log +import sys +from distutils.log import * # noqa: F403 +from distutils.log import Log as old_Log +from distutils.log import _global_log + +from numpy.distutils.misc_util import (red_text, default_text, cyan_text, + green_text, is_sequence, is_string) + + +def _fix_args(args,flag=1): + if is_string(args): + return args.replace('%', '%%') + if flag and is_sequence(args): + return tuple([_fix_args(a, flag=0) for a in args]) + return args + + +class Log(old_Log): + def _log(self, level, msg, args): + if level >= self.threshold: + if args: + msg = msg % _fix_args(args) + if 0: + if msg.startswith('copying ') and msg.find(' -> ') != -1: + return + if msg.startswith('byte-compiling '): + return + print(_global_color_map[level](msg)) + sys.stdout.flush() + + def good(self, msg, *args): + """ + If we log WARN messages, log this message as a 'nice' anti-warn + message. + + """ + if WARN >= self.threshold: + if args: + print(green_text(msg % _fix_args(args))) + else: + print(green_text(msg)) + sys.stdout.flush() + + +_global_log.__class__ = Log + +good = _global_log.good + +def set_threshold(level, force=False): + prev_level = _global_log.threshold + if prev_level > DEBUG or force: + # If we're running at DEBUG, don't change the threshold, as there's + # likely a good reason why we're running at this level. + _global_log.threshold = level + if level <= DEBUG: + info('set_threshold: setting threshold to DEBUG level,' + ' it can be changed only with force argument') + else: + info('set_threshold: not changing threshold from DEBUG level' + ' %s to %s' % (prev_level, level)) + return prev_level + +def get_threshold(): + return _global_log.threshold + +def set_verbosity(v, force=False): + prev_level = _global_log.threshold + if v < 0: + set_threshold(ERROR, force) + elif v == 0: + set_threshold(WARN, force) + elif v == 1: + set_threshold(INFO, force) + elif v >= 2: + set_threshold(DEBUG, force) + return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) + + +_global_color_map = { + DEBUG:cyan_text, + INFO:default_text, + WARN:red_text, + ERROR:red_text, + FATAL:red_text +} + +# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. +set_verbosity(0, force=True) + + +_error = error +_warn = warn +_info = info +_debug = debug + + +def error(msg, *a, **kw): + _error(f"ERROR: {msg}", *a, **kw) + + +def warn(msg, *a, **kw): + _warn(f"WARN: {msg}", *a, **kw) + + +def info(msg, *a, **kw): + _info(f"INFO: {msg}", *a, **kw) + + +def debug(msg, *a, **kw): + _debug(f"DEBUG: {msg}", *a, **kw) diff --git a/phivenv/Lib/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c b/phivenv/Lib/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c new file mode 100644 index 0000000000000000000000000000000000000000..d21fce58b73ad52ff583bdf0bdd153c497063e80 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c @@ -0,0 +1,6 @@ +int _get_output_format(void) +{ + return 0; +} + +int _imp____lc_codepage = 0; diff --git a/phivenv/Lib/site-packages/numpy/distutils/mingw32ccompiler.py b/phivenv/Lib/site-packages/numpy/distutils/mingw32ccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..efee4c1b001d54f907009a92ce23204d998c4b4c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/mingw32ccompiler.py @@ -0,0 +1,591 @@ +""" +Support code for building Python extensions on Windows. + + # NT stuff + # 1. Make sure libpython.a exists for gcc. If not, build it. + # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) + # 3. Force windows to use g77 + +""" +import os +import sys +import subprocess +import re +import textwrap + +# Overwrite certain distutils.ccompiler functions: +import numpy.distutils.ccompiler # noqa: F401 +from numpy.distutils import log +# NT stuff +# 1. Make sure libpython.a exists for gcc. If not, build it. +# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) +# --> this is done in numpy/distutils/ccompiler.py +# 3. Force windows to use g77 + +import distutils.cygwinccompiler +from distutils.unixccompiler import UnixCCompiler +from distutils.msvccompiler import get_build_version as get_build_msvc_version +from distutils.errors import UnknownFileError +from numpy.distutils.misc_util import (msvc_runtime_library, + msvc_runtime_version, + msvc_runtime_major, + get_build_architecture) + +def get_msvcr_replacement(): + """Replacement for outdated version of get_msvcr from cygwinccompiler""" + msvcr = msvc_runtime_library() + return [] if msvcr is None else [msvcr] + + +# Useful to generate table of symbols from a dll +_START = re.compile(r'\[Ordinal/Name Pointer\] Table') +_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') + +# the same as cygwin plus some additional parameters +class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): + """ A modified MingW32 compiler compatible with an MSVC built Python. + + """ + + compiler_type = 'mingw32' + + def __init__ (self, + verbose=0, + dry_run=0, + force=0): + + distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, + dry_run, force) + + # **changes: eric jones 4/11/01 + # 1. Check for import library on Windows. Build if it doesn't exist. + + build_import_library() + + # Check for custom msvc runtime library on Windows. Build if it doesn't exist. + msvcr_success = build_msvcr_library() + msvcr_dbg_success = build_msvcr_library(debug=True) + if msvcr_success or msvcr_dbg_success: + # add preprocessor statement for using customized msvcr lib + self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') + + # Define the MSVC version as hint for MinGW + msvcr_version = msvc_runtime_version() + if msvcr_version: + self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) + + # MS_WIN64 should be defined when building for amd64 on windows, + # but python headers define it only for MS compilers, which has all + # kind of bad consequences, like using Py_ModuleInit4 instead of + # Py_ModuleInit4_64, etc... So we add it here + if get_build_architecture() == 'AMD64': + self.set_executables( + compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', + compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall ' + '-Wstrict-prototypes', + linker_exe='gcc -g', + linker_so='gcc -g -shared') + else: + self.set_executables( + compiler='gcc -O2 -Wall', + compiler_so='gcc -O2 -Wall -Wstrict-prototypes', + linker_exe='g++ ', + linker_so='g++ -shared') + # added for python2.3 support + # we can't pass it through set_executables because pre 2.2 would fail + self.compiler_cxx = ['g++'] + + # Maybe we should also append -mthreads, but then the finished dlls + # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support + # thread-safe exception handling on `Mingw32') + + # no additional libraries needed + #self.dll_libraries=[] + return + + # __init__ () + + def link(self, + target_desc, + objects, + output_filename, + output_dir, + libraries, + library_dirs, + runtime_library_dirs, + export_symbols = None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): + # Include the appropriate MSVC runtime library if Python was built + # with MSVC >= 7.0 (MinGW standard is msvcrt) + runtime_library = msvc_runtime_library() + if runtime_library: + if not libraries: + libraries = [] + libraries.append(runtime_library) + args = (self, + target_desc, + objects, + output_filename, + output_dir, + libraries, + library_dirs, + runtime_library_dirs, + None, #export_symbols, we do this in our def-file + debug, + extra_preargs, + extra_postargs, + build_temp, + target_lang) + func = UnixCCompiler.link + func(*args[:func.__code__.co_argcount]) + return + + def object_filenames (self, + source_filenames, + strip_dir=0, + output_dir=''): + if output_dir is None: output_dir = '' + obj_names = [] + for src_name in source_filenames: + # use normcase to make sure '.rc' is really '.rc' and not '.RC' + (base, ext) = os.path.splitext (os.path.normcase(src_name)) + + # added these lines to strip off windows drive letters + # without it, .o files are placed next to .c files + # instead of the build directory + drv, base = os.path.splitdrive(base) + if drv: + base = base[1:] + + if ext not in (self.src_extensions + ['.rc', '.res']): + raise UnknownFileError( + "unknown file type '%s' (from '%s')" % \ + (ext, src_name)) + if strip_dir: + base = os.path.basename (base) + if ext == '.res' or ext == '.rc': + # these need to be compiled to object files + obj_names.append (os.path.join (output_dir, + base + ext + self.obj_extension)) + else: + obj_names.append (os.path.join (output_dir, + base + self.obj_extension)) + return obj_names + + # object_filenames () + + +def find_python_dll(): + # We can't do much here: + # - find it in the virtualenv (sys.prefix) + # - find it in python main dir (sys.base_prefix, if in a virtualenv) + # - in system32, + # - ortherwise (Sxs), I don't know how to get it. + stems = [sys.prefix] + if sys.base_prefix != sys.prefix: + stems.append(sys.base_prefix) + + sub_dirs = ['', 'lib', 'bin'] + # generate possible combinations of directory trees and sub-directories + lib_dirs = [] + for stem in stems: + for folder in sub_dirs: + lib_dirs.append(os.path.join(stem, folder)) + + # add system directory as well + if 'SYSTEMROOT' in os.environ: + lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) + + # search in the file system for possible candidates + major_version, minor_version = tuple(sys.version_info[:2]) + implementation = sys.implementation.name + if implementation == 'cpython': + dllname = f'python{major_version}{minor_version}.dll' + elif implementation == 'pypy': + dllname = f'libpypy{major_version}.{minor_version}-c.dll' + else: + dllname = f'Unknown platform {implementation}' + print("Looking for %s" % dllname) + for folder in lib_dirs: + dll = os.path.join(folder, dllname) + if os.path.exists(dll): + return dll + + raise ValueError("%s not found in %s" % (dllname, lib_dirs)) + +def dump_table(dll): + st = subprocess.check_output(["objdump.exe", "-p", dll]) + return st.split(b'\n') + +def generate_def(dll, dfile): + """Given a dll file location, get all its exported symbols and dump them + into the given def file. + + The .def file will be overwritten""" + dump = dump_table(dll) + for i in range(len(dump)): + if _START.match(dump[i].decode()): + break + else: + raise ValueError("Symbol table not found") + + syms = [] + for j in range(i+1, len(dump)): + m = _TABLE.match(dump[j].decode()) + if m: + syms.append((int(m.group(1).strip()), m.group(2))) + else: + break + + if len(syms) == 0: + log.warn('No symbols found in %s' % dll) + + with open(dfile, 'w') as d: + d.write('LIBRARY %s\n' % os.path.basename(dll)) + d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') + d.write(';DATA PRELOAD SINGLE\n') + d.write('\nEXPORTS\n') + for s in syms: + #d.write('@%d %s\n' % (s[0], s[1])) + d.write('%s\n' % s[1]) + +def find_dll(dll_name): + + arch = {'AMD64' : 'amd64', + 'Intel' : 'x86'}[get_build_architecture()] + + def _find_dll_in_winsxs(dll_name): + # Walk through the WinSxS directory to find the dll. + winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), + 'winsxs') + if not os.path.exists(winsxs_path): + return None + for root, dirs, files in os.walk(winsxs_path): + if dll_name in files and arch in root: + return os.path.join(root, dll_name) + return None + + def _find_dll_in_path(dll_name): + # First, look in the Python directory, then scan PATH for + # the given dll name. + for path in [sys.prefix] + os.environ['PATH'].split(';'): + filepath = os.path.join(path, dll_name) + if os.path.exists(filepath): + return os.path.abspath(filepath) + + return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) + +def build_msvcr_library(debug=False): + if os.name != 'nt': + return False + + # If the version number is None, then we couldn't find the MSVC runtime at + # all, because we are running on a Python distribution which is customed + # compiled; trust that the compiler is the same as the one available to us + # now, and that it is capable of linking with the correct runtime without + # any extra options. + msvcr_ver = msvc_runtime_major() + if msvcr_ver is None: + log.debug('Skip building import library: ' + 'Runtime is not compiled with MSVC') + return False + + # Skip using a custom library for versions < MSVC 8.0 + if msvcr_ver < 80: + log.debug('Skip building msvcr library:' + ' custom functionality not present') + return False + + msvcr_name = msvc_runtime_library() + if debug: + msvcr_name += 'd' + + # Skip if custom library already exists + out_name = "lib%s.a" % msvcr_name + out_file = os.path.join(sys.prefix, 'libs', out_name) + if os.path.isfile(out_file): + log.debug('Skip building msvcr library: "%s" exists' % + (out_file,)) + return True + + # Find the msvcr dll + msvcr_dll_name = msvcr_name + '.dll' + dll_file = find_dll(msvcr_dll_name) + if not dll_file: + log.warn('Cannot build msvcr library: "%s" not found' % + msvcr_dll_name) + return False + + def_name = "lib%s.def" % msvcr_name + def_file = os.path.join(sys.prefix, 'libs', def_name) + + log.info('Building msvcr library: "%s" (from %s)' \ + % (out_file, dll_file)) + + # Generate a symbol definition file from the msvcr dll + generate_def(dll_file, def_file) + + # Create a custom mingw library for the given symbol definitions + cmd = ['dlltool', '-d', def_file, '-l', out_file] + retcode = subprocess.call(cmd) + + # Clean up symbol definitions + os.remove(def_file) + + return (not retcode) + +def build_import_library(): + if os.name != 'nt': + return + + arch = get_build_architecture() + if arch == 'AMD64': + return _build_import_library_amd64() + elif arch == 'Intel': + return _build_import_library_x86() + else: + raise ValueError("Unhandled arch %s" % arch) + +def _check_for_import_lib(): + """Check if an import library for the Python runtime already exists.""" + major_version, minor_version = tuple(sys.version_info[:2]) + + # patterns for the file name of the library itself + patterns = ['libpython%d%d.a', + 'libpython%d%d.dll.a', + 'libpython%d.%d.dll.a'] + + # directory trees that may contain the library + stems = [sys.prefix] + if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: + stems.append(sys.base_prefix) + elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: + stems.append(sys.real_prefix) + + # possible subdirectories within those trees where it is placed + sub_dirs = ['libs', 'lib'] + + # generate a list of candidate locations + candidates = [] + for pat in patterns: + filename = pat % (major_version, minor_version) + for stem_dir in stems: + for folder in sub_dirs: + candidates.append(os.path.join(stem_dir, folder, filename)) + + # test the filesystem to see if we can find any of these + for fullname in candidates: + if os.path.isfile(fullname): + # already exists, in location given + return (True, fullname) + + # needs to be built, preferred location given first + return (False, candidates[0]) + +def _build_import_library_amd64(): + out_exists, out_file = _check_for_import_lib() + if out_exists: + log.debug('Skip building import library: "%s" exists', out_file) + return + + # get the runtime dll for which we are building import library + dll_file = find_python_dll() + log.info('Building import library (arch=AMD64): "%s" (from %s)' % + (out_file, dll_file)) + + # generate symbol list from this library + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + generate_def(dll_file, def_file) + + # generate import library from this symbol list + cmd = ['dlltool', '-d', def_file, '-l', out_file] + subprocess.check_call(cmd) + +def _build_import_library_x86(): + """ Build the import libraries for Mingw32-gcc on Windows + """ + out_exists, out_file = _check_for_import_lib() + if out_exists: + log.debug('Skip building import library: "%s" exists', out_file) + return + + lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) + lib_file = os.path.join(sys.prefix, 'libs', lib_name) + if not os.path.isfile(lib_file): + # didn't find library file in virtualenv, try base distribution, too, + # and use that instead if found there. for Python 2.7 venvs, the base + # directory is in attribute real_prefix instead of base_prefix. + if hasattr(sys, 'base_prefix'): + base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) + elif hasattr(sys, 'real_prefix'): + base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) + else: + base_lib = '' # os.path.isfile('') == False + + if os.path.isfile(base_lib): + lib_file = base_lib + else: + log.warn('Cannot build import library: "%s" not found', lib_file) + return + log.info('Building import library (ARCH=x86): "%s"', out_file) + + from numpy.distutils import lib2def + + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + nm_output = lib2def.getnm( + lib2def.DEFAULT_NM + [lib_file], shell=False) + dlist, flist = lib2def.parse_nm(nm_output) + with open(def_file, 'w') as fid: + lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid) + + dll_name = find_python_dll () + + cmd = ["dlltool", + "--dllname", dll_name, + "--def", def_file, + "--output-lib", out_file] + status = subprocess.check_output(cmd) + if status: + log.warn('Failed to build import library for gcc. Linking will fail.') + return + +#===================================== +# Dealing with Visual Studio MANIFESTS +#===================================== + +# Functions to deal with visual studio manifests. Manifest are a mechanism to +# enforce strong DLL versioning on windows, and has nothing to do with +# distutils MANIFEST. manifests are XML files with version info, and used by +# the OS loader; they are necessary when linking against a DLL not in the +# system path; in particular, official python 2.6 binary is built against the +# MS runtime 9 (the one from VS 2008), which is not available on most windows +# systems; python 2.6 installer does install it in the Win SxS (Side by side) +# directory, but this requires the manifest for this to work. This is a big +# mess, thanks MS for a wonderful system. + +# XXX: ideally, we should use exactly the same version as used by python. I +# submitted a patch to get this version, but it was only included for python +# 2.6.1 and above. So for versions below, we use a "best guess". +_MSVCRVER_TO_FULLVER = {} +if sys.platform == 'win32': + try: + import msvcrt + # I took one version in my SxS directory: no idea if it is the good + # one, and we can't retrieve it from python + _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" + _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" + # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 + # on Windows XP: + _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" + crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None) + if crt_ver is not None: # Available at least back to Python 3.3 + maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups() + _MSVCRVER_TO_FULLVER[maj + min] = crt_ver + del maj, min + del crt_ver + except ImportError: + # If we are here, means python was not built with MSVC. Not sure what + # to do in that case: manifest building will fail, but it should not be + # used in that case anyway + log.warn('Cannot import msvcrt: using manifest will not be possible') + +def msvc_manifest_xml(maj, min): + """Given a major and minor version of the MSVCR, returns the + corresponding XML file.""" + try: + fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] + except KeyError: + raise ValueError("Version %d,%d of MSVCRT not supported yet" % + (maj, min)) from None + # Don't be fooled, it looks like an XML, but it is not. In particular, it + # should not have any space before starting, and its size should be + # divisible by 4, most likely for alignment constraints when the xml is + # embedded in the binary... + # This template was copied directly from the python 2.6 binary (using + # strings.exe from mingw on python.exe). + template = textwrap.dedent("""\ + + + + + + + + + + + + + + """) + + return template % {'fullver': fullver, 'maj': maj, 'min': min} + +def manifest_rc(name, type='dll'): + """Return the rc file used to generate the res file which will be embedded + as manifest for given manifest file name, of given type ('dll' or + 'exe'). + + Parameters + ---------- + name : str + name of the manifest file to embed + type : str {'dll', 'exe'} + type of the binary which will embed the manifest + + """ + if type == 'dll': + rctype = 2 + elif type == 'exe': + rctype = 1 + else: + raise ValueError("Type %s not supported" % type) + + return """\ +#include "winuser.h" +%d RT_MANIFEST %s""" % (rctype, name) + +def check_embedded_msvcr_match_linked(msver): + """msver is the ms runtime version used for the MANIFEST.""" + # check msvcr major version are the same for linking and + # embedding + maj = msvc_runtime_major() + if maj: + if not maj == int(msver): + raise ValueError( + "Discrepancy between linked msvcr " \ + "(%d) and the one about to be embedded " \ + "(%d)" % (int(msver), maj)) + +def configtest_name(config): + base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) + return os.path.splitext(base)[0] + +def manifest_name(config): + # Get configest name (including suffix) + root = configtest_name(config) + exext = config.compiler.exe_extension + return root + exext + ".manifest" + +def rc_name(config): + # Get configtest name (including suffix) + root = configtest_name(config) + return root + ".rc" + +def generate_manifest(config): + msver = get_build_msvc_version() + if msver is not None: + if msver >= 8: + check_embedded_msvcr_match_linked(msver) + ma_str, mi_str = str(msver).split('.') + # Write the manifest file + manxml = msvc_manifest_xml(int(ma_str), int(mi_str)) + with open(manifest_name(config), "w") as man: + config.temp_files.append(manifest_name(config)) + man.write(manxml) diff --git a/phivenv/Lib/site-packages/numpy/distutils/misc_util.py b/phivenv/Lib/site-packages/numpy/distutils/misc_util.py new file mode 100644 index 0000000000000000000000000000000000000000..6a4dd6f07e9d01964e2b20e641c12dbdd59ca149 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/misc_util.py @@ -0,0 +1,2493 @@ +import os +import re +import sys +import copy +import glob +import atexit +import tempfile +import subprocess +import shutil +import multiprocessing +import textwrap +import importlib.util +from threading import local as tlocal +from functools import reduce + +import distutils +from distutils.errors import DistutilsError + +# stores temporary directory of each thread to only create one per thread +_tdata = tlocal() + +# store all created temporary directories so they can be deleted on exit +_tmpdirs = [] +def clean_up_temporary_directory(): + if _tmpdirs is not None: + for d in _tmpdirs: + try: + shutil.rmtree(d) + except OSError: + pass + +atexit.register(clean_up_temporary_directory) + +__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', + 'dict_append', 'appendpath', 'generate_config_py', + 'get_cmd', 'allpath', 'get_mathlibs', + 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', + 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', + 'has_f_sources', 'has_cxx_sources', 'filter_sources', + 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', + 'get_script_files', 'get_lib_source_files', 'get_data_files', + 'dot_join', 'get_frame', 'minrelpath', 'njoin', + 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', + 'get_build_architecture', 'get_info', 'get_pkg_info', + 'get_num_build_jobs', 'sanitize_cxx_flags', + 'exec_mod_from_location'] + +class InstallableLib: + """ + Container to hold information on an installable library. + + Parameters + ---------- + name : str + Name of the installed library. + build_info : dict + Dictionary holding build information. + target_dir : str + Absolute path specifying where to install the library. + + See Also + -------- + Configuration.add_installed_library + + Notes + ----- + The three parameters are stored as attributes with the same names. + + """ + def __init__(self, name, build_info, target_dir): + self.name = name + self.build_info = build_info + self.target_dir = target_dir + + +def get_num_build_jobs(): + """ + Get number of parallel build jobs set by the --parallel command line + argument of setup.py + If the command did not receive a setting the environment variable + NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of + processors on the system, with a maximum of 8 (to prevent + overloading the system if there a lot of CPUs). + + Returns + ------- + out : int + number of parallel jobs that can be run + + """ + from numpy.distutils.core import get_distribution + try: + cpu_count = len(os.sched_getaffinity(0)) + except AttributeError: + cpu_count = multiprocessing.cpu_count() + cpu_count = min(cpu_count, 8) + envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count)) + dist = get_distribution() + # may be None during configuration + if dist is None: + return envjobs + + # any of these three may have the job set, take the largest + cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None), + getattr(dist.get_command_obj('build_ext'), 'parallel', None), + getattr(dist.get_command_obj('build_clib'), 'parallel', None)) + if all(x is None for x in cmdattr): + return envjobs + else: + return max(x for x in cmdattr if x is not None) + +def quote_args(args): + """Quote list of arguments. + + .. deprecated:: 1.22. + """ + import warnings + warnings.warn('"quote_args" is deprecated.', + DeprecationWarning, stacklevel=2) + # don't used _nt_quote_args as it does not check if + # args items already have quotes or not. + args = list(args) + for i in range(len(args)): + a = args[i] + if ' ' in a and a[0] not in '"\'': + args[i] = '"%s"' % (a) + return args + +def allpath(name): + "Convert a /-separated pathname to one using the OS's path separator." + split = name.split('/') + return os.path.join(*split) + +def rel_path(path, parent_path): + """Return path relative to parent_path.""" + # Use realpath to avoid issues with symlinked dirs (see gh-7707) + pd = os.path.realpath(os.path.abspath(parent_path)) + apath = os.path.realpath(os.path.abspath(path)) + if len(apath) < len(pd): + return path + if apath == pd: + return '' + if pd == apath[:len(pd)]: + assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) + path = apath[len(pd)+1:] + return path + +def get_path_from_frame(frame, parent_path=None): + """Return path of the module given a frame object from the call stack. + + Returned path is relative to parent_path when given, + otherwise it is absolute path. + """ + + # First, try to find if the file name is in the frame. + try: + caller_file = eval('__file__', frame.f_globals, frame.f_locals) + d = os.path.dirname(os.path.abspath(caller_file)) + except NameError: + # __file__ is not defined, so let's try __name__. We try this second + # because setuptools spoofs __name__ to be '__main__' even though + # sys.modules['__main__'] might be something else, like easy_install(1). + caller_name = eval('__name__', frame.f_globals, frame.f_locals) + __import__(caller_name) + mod = sys.modules[caller_name] + if hasattr(mod, '__file__'): + d = os.path.dirname(os.path.abspath(mod.__file__)) + else: + # we're probably running setup.py as execfile("setup.py") + # (likely we're building an egg) + d = os.path.abspath('.') + + if parent_path is not None: + d = rel_path(d, parent_path) + + return d or '.' + +def njoin(*path): + """Join two or more pathname components + + - convert a /-separated pathname to one using the OS's path separator. + - resolve `..` and `.` from path. + + Either passing n arguments as in njoin('a','b'), or a sequence + of n names as in njoin(['a','b']) is handled, or a mixture of such arguments. + """ + paths = [] + for p in path: + if is_sequence(p): + # njoin(['a', 'b'], 'c') + paths.append(njoin(*p)) + else: + assert is_string(p) + paths.append(p) + path = paths + if not path: + # njoin() + joined = '' + else: + # njoin('a', 'b') + joined = os.path.join(*path) + if os.path.sep != '/': + joined = joined.replace('/', os.path.sep) + return minrelpath(joined) + +def get_mathlibs(path=None): + """Return the MATHLIB line from numpyconfig.h + """ + if path is not None: + config_file = os.path.join(path, '_numpyconfig.h') + else: + # Look for the file in each of the numpy include directories. + dirs = get_numpy_include_dirs() + for path in dirs: + fn = os.path.join(path, '_numpyconfig.h') + if os.path.exists(fn): + config_file = fn + break + else: + raise DistutilsError('_numpyconfig.h not found in numpy include ' + 'dirs %r' % (dirs,)) + + with open(config_file) as fid: + mathlibs = [] + s = '#define MATHLIB' + for line in fid: + if line.startswith(s): + value = line[len(s):].strip() + if value: + mathlibs.extend(value.split(',')) + return mathlibs + +def minrelpath(path): + """Resolve `..` and '.' from path. + """ + if not is_string(path): + return path + if '.' not in path: + return path + l = path.split(os.sep) + while l: + try: + i = l.index('.', 1) + except ValueError: + break + del l[i] + j = 1 + while l: + try: + i = l.index('..', j) + except ValueError: + break + if l[i-1]=='..': + j += 1 + else: + del l[i], l[i-1] + j = 1 + if not l: + return '' + return os.sep.join(l) + +def sorted_glob(fileglob): + """sorts output of python glob for https://bugs.python.org/issue30461 + to allow extensions to have reproducible build results""" + return sorted(glob.glob(fileglob)) + +def _fix_paths(paths, local_path, include_non_existing): + assert is_sequence(paths), repr(type(paths)) + new_paths = [] + assert not is_string(paths), repr(paths) + for n in paths: + if is_string(n): + if '*' in n or '?' in n: + p = sorted_glob(n) + p2 = sorted_glob(njoin(local_path, n)) + if p2: + new_paths.extend(p2) + elif p: + new_paths.extend(p) + else: + if include_non_existing: + new_paths.append(n) + print('could not resolve pattern in %r: %r' % + (local_path, n)) + else: + n2 = njoin(local_path, n) + if os.path.exists(n2): + new_paths.append(n2) + else: + if os.path.exists(n): + new_paths.append(n) + elif include_non_existing: + new_paths.append(n) + if not os.path.exists(n): + print('non-existing path in %r: %r' % + (local_path, n)) + + elif is_sequence(n): + new_paths.extend(_fix_paths(n, local_path, include_non_existing)) + else: + new_paths.append(n) + return [minrelpath(p) for p in new_paths] + +def gpaths(paths, local_path='', include_non_existing=True): + """Apply glob to paths and prepend local_path if needed. + """ + if is_string(paths): + paths = (paths,) + return _fix_paths(paths, local_path, include_non_existing) + +def make_temp_file(suffix='', prefix='', text=True): + if not hasattr(_tdata, 'tempdir'): + _tdata.tempdir = tempfile.mkdtemp() + _tmpdirs.append(_tdata.tempdir) + fid, name = tempfile.mkstemp(suffix=suffix, + prefix=prefix, + dir=_tdata.tempdir, + text=text) + fo = os.fdopen(fid, 'w') + return fo, name + +# Hooks for colored terminal output. +# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle +def terminal_has_colors(): + if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ: + # Avoid importing curses that causes illegal operation + # with a message: + # PYTHON2 caused an invalid page fault in + # module CYGNURSES7.DLL as 015f:18bbfc28 + # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] + # ssh to Win32 machine from debian + # curses.version is 2.2 + # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) + return 0 + if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): + try: + import curses + curses.setupterm() + if (curses.tigetnum("colors") >= 0 + and curses.tigetnum("pairs") >= 0 + and ((curses.tigetstr("setf") is not None + and curses.tigetstr("setb") is not None) + or (curses.tigetstr("setaf") is not None + and curses.tigetstr("setab") is not None) + or curses.tigetstr("scp") is not None)): + return 1 + except Exception: + pass + return 0 + +if terminal_has_colors(): + _colour_codes = dict(black=0, red=1, green=2, yellow=3, + blue=4, magenta=5, cyan=6, white=7, default=9) + def colour_text(s, fg=None, bg=None, bold=False): + seq = [] + if bold: + seq.append('1') + if fg: + fgcode = 30 + _colour_codes.get(fg.lower(), 0) + seq.append(str(fgcode)) + if bg: + bgcode = 40 + _colour_codes.get(bg.lower(), 7) + seq.append(str(bgcode)) + if seq: + return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) + else: + return s +else: + def colour_text(s, fg=None, bg=None): + return s + +def default_text(s): + return colour_text(s, 'default') +def red_text(s): + return colour_text(s, 'red') +def green_text(s): + return colour_text(s, 'green') +def yellow_text(s): + return colour_text(s, 'yellow') +def cyan_text(s): + return colour_text(s, 'cyan') +def blue_text(s): + return colour_text(s, 'blue') + +######################### + +def cyg2win32(path: str) -> str: + """Convert a path from Cygwin-native to Windows-native. + + Uses the cygpath utility (part of the Base install) to do the + actual conversion. Falls back to returning the original path if + this fails. + + Handles the default ``/cygdrive`` mount prefix as well as the + ``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such + as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or + ``/home/username`` + + Parameters + ---------- + path : str + The path to convert + + Returns + ------- + converted_path : str + The converted path + + Notes + ----- + Documentation for cygpath utility: + https://cygwin.com/cygwin-ug-net/cygpath.html + Documentation for the C function it wraps: + https://cygwin.com/cygwin-api/func-cygwin-conv-path.html + + """ + if sys.platform != "cygwin": + return path + return subprocess.check_output( + ["/usr/bin/cygpath", "--windows", path], text=True + ) + + +def mingw32(): + """Return true when using mingw32 environment. + """ + if sys.platform=='win32': + if os.environ.get('OSTYPE', '')=='msys': + return True + if os.environ.get('MSYSTEM', '')=='MINGW32': + return True + return False + +def msvc_runtime_version(): + "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" + msc_pos = sys.version.find('MSC v.') + if msc_pos != -1: + msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) + else: + msc_ver = None + return msc_ver + +def msvc_runtime_library(): + "Return name of MSVC runtime library if Python was built with MSVC >= 7" + ver = msvc_runtime_major () + if ver: + if ver < 140: + return "msvcr%i" % ver + else: + return "vcruntime%i" % ver + else: + return None + +def msvc_runtime_major(): + "Return major version of MSVC runtime coded like get_build_msvc_version" + major = {1300: 70, # MSVC 7.0 + 1310: 71, # MSVC 7.1 + 1400: 80, # MSVC 8 + 1500: 90, # MSVC 9 (aka 2008) + 1600: 100, # MSVC 10 (aka 2010) + 1900: 140, # MSVC 14 (aka 2015) + }.get(msvc_runtime_version(), None) + return major + +######################### + +#XXX need support for .C that is also C++ +cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match +fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match +f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match +f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match +def _get_f90_modules(source): + """Return a list of Fortran f90 module names that + given source file defines. + """ + if not f90_ext_match(source): + return [] + modules = [] + with open(source) as f: + for line in f: + m = f90_module_name_match(line) + if m: + name = m.group('name') + modules.append(name) + # break # XXX can we assume that there is one module per file? + return modules + +def is_string(s): + return isinstance(s, str) + +def all_strings(lst): + """Return True if all items in lst are string objects. """ + for item in lst: + if not is_string(item): + return False + return True + +def is_sequence(seq): + if is_string(seq): + return False + try: + len(seq) + except Exception: + return False + return True + +def is_glob_pattern(s): + return is_string(s) and ('*' in s or '?' in s) + +def as_list(seq): + if is_sequence(seq): + return list(seq) + else: + return [seq] + +def get_language(sources): + # not used in numpy/scipy packages, use build_ext.detect_language instead + """Determine language value (c,f77,f90) from sources """ + language = None + for source in sources: + if isinstance(source, str): + if f90_ext_match(source): + language = 'f90' + break + elif fortran_ext_match(source): + language = 'f77' + return language + +def has_f_sources(sources): + """Return True if sources contains Fortran files """ + for source in sources: + if fortran_ext_match(source): + return True + return False + +def has_cxx_sources(sources): + """Return True if sources contains C++ files """ + for source in sources: + if cxx_ext_match(source): + return True + return False + +def filter_sources(sources): + """Return four lists of filenames containing + C, C++, Fortran, and Fortran 90 module sources, + respectively. + """ + c_sources = [] + cxx_sources = [] + f_sources = [] + fmodule_sources = [] + for source in sources: + if fortran_ext_match(source): + modules = _get_f90_modules(source) + if modules: + fmodule_sources.append(source) + else: + f_sources.append(source) + elif cxx_ext_match(source): + cxx_sources.append(source) + else: + c_sources.append(source) + return c_sources, cxx_sources, f_sources, fmodule_sources + + +def _get_headers(directory_list): + # get *.h files from list of directories + headers = [] + for d in directory_list: + head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? + headers.extend(head) + return headers + +def _get_directories(list_of_sources): + # get unique directories from list of sources. + direcs = [] + for f in list_of_sources: + d = os.path.split(f) + if d[0] != '' and not d[0] in direcs: + direcs.append(d[0]) + return direcs + +def _commandline_dep_string(cc_args, extra_postargs, pp_opts): + """ + Return commandline representation used to determine if a file needs + to be recompiled + """ + cmdline = 'commandline: ' + cmdline += ' '.join(cc_args) + cmdline += ' '.join(extra_postargs) + cmdline += ' '.join(pp_opts) + '\n' + return cmdline + + +def get_dependencies(sources): + #XXX scan sources for include statements + return _get_headers(_get_directories(sources)) + +def is_local_src_dir(directory): + """Return true if directory is local directory. + """ + if not is_string(directory): + return False + abs_dir = os.path.abspath(directory) + c = os.path.commonprefix([os.getcwd(), abs_dir]) + new_dir = abs_dir[len(c):].split(os.sep) + if new_dir and not new_dir[0]: + new_dir = new_dir[1:] + if new_dir and new_dir[0]=='build': + return False + new_dir = os.sep.join(new_dir) + return os.path.isdir(new_dir) + +def general_source_files(top_path): + pruned_directories = {'CVS':1, '.svn':1, 'build':1} + prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') + for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): + pruned = [ d for d in dirnames if d not in pruned_directories ] + dirnames[:] = pruned + for f in filenames: + if not prune_file_pat.search(f): + yield os.path.join(dirpath, f) + +def general_source_directories_files(top_path): + """Return a directory name relative to top_path and + files contained. + """ + pruned_directories = ['CVS', '.svn', 'build'] + prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') + for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): + pruned = [ d for d in dirnames if d not in pruned_directories ] + dirnames[:] = pruned + for d in dirnames: + dpath = os.path.join(dirpath, d) + rpath = rel_path(dpath, top_path) + files = [] + for f in os.listdir(dpath): + fn = os.path.join(dpath, f) + if os.path.isfile(fn) and not prune_file_pat.search(fn): + files.append(fn) + yield rpath, files + dpath = top_path + rpath = rel_path(dpath, top_path) + filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ + if not prune_file_pat.search(f)] + files = [f for f in filenames if os.path.isfile(f)] + yield rpath, files + + +def get_ext_source_files(ext): + # Get sources and any include files in the same directory. + filenames = [] + sources = [_m for _m in ext.sources if is_string(_m)] + filenames.extend(sources) + filenames.extend(get_dependencies(sources)) + for d in ext.depends: + if is_local_src_dir(d): + filenames.extend(list(general_source_files(d))) + elif os.path.isfile(d): + filenames.append(d) + return filenames + +def get_script_files(scripts): + scripts = [_m for _m in scripts if is_string(_m)] + return scripts + +def get_lib_source_files(lib): + filenames = [] + sources = lib[1].get('sources', []) + sources = [_m for _m in sources if is_string(_m)] + filenames.extend(sources) + filenames.extend(get_dependencies(sources)) + depends = lib[1].get('depends', []) + for d in depends: + if is_local_src_dir(d): + filenames.extend(list(general_source_files(d))) + elif os.path.isfile(d): + filenames.append(d) + return filenames + +def get_shared_lib_extension(is_python_ext=False): + """Return the correct file extension for shared libraries. + + Parameters + ---------- + is_python_ext : bool, optional + Whether the shared library is a Python extension. Default is False. + + Returns + ------- + so_ext : str + The shared library extension. + + Notes + ----- + For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, + and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on + POSIX systems according to PEP 3149. + + """ + confvars = distutils.sysconfig.get_config_vars() + so_ext = confvars.get('EXT_SUFFIX', '') + + if not is_python_ext: + # hardcode known values, config vars (including SHLIB_SUFFIX) are + # unreliable (see #3182) + # darwin, windows and debug linux are wrong in 3.3.1 and older + if (sys.platform.startswith('linux') or + sys.platform.startswith('gnukfreebsd')): + so_ext = '.so' + elif sys.platform.startswith('darwin'): + so_ext = '.dylib' + elif sys.platform.startswith('win'): + so_ext = '.dll' + else: + # fall back to config vars for unknown platforms + # fix long extension for Python >=3.2, see PEP 3149. + if 'SOABI' in confvars: + # Does nothing unless SOABI config var exists + so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) + + return so_ext + +def get_data_files(data): + if is_string(data): + return [data] + sources = data[1] + filenames = [] + for s in sources: + if hasattr(s, '__call__'): + continue + if is_local_src_dir(s): + filenames.extend(list(general_source_files(s))) + elif is_string(s): + if os.path.isfile(s): + filenames.append(s) + else: + print('Not existing data file:', s) + else: + raise TypeError(repr(s)) + return filenames + +def dot_join(*args): + return '.'.join([a for a in args if a]) + +def get_frame(level=0): + """Return frame object from call stack with given level. + """ + try: + return sys._getframe(level+1) + except AttributeError: + frame = sys.exc_info()[2].tb_frame + for _ in range(level+1): + frame = frame.f_back + return frame + + +###################### + +class Configuration: + + _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', + 'libraries', 'headers', 'scripts', 'py_modules', + 'installed_libraries', 'define_macros'] + _dict_keys = ['package_dir', 'installed_pkg_config'] + _extra_keys = ['name', 'version'] + + numpy_include_dirs = [] + + def __init__(self, + package_name=None, + parent_name=None, + top_path=None, + package_path=None, + caller_level=1, + setup_name='setup.py', + **attrs): + """Construct configuration instance of a package. + + package_name -- name of the package + Ex.: 'distutils' + parent_name -- name of the parent package + Ex.: 'numpy' + top_path -- directory of the toplevel package + Ex.: the directory where the numpy package source sits + package_path -- directory of package. Will be computed by magic from the + directory of the caller module if not specified + Ex.: the directory where numpy.distutils is + caller_level -- frame level to caller namespace, internal parameter. + """ + self.name = dot_join(parent_name, package_name) + self.version = None + + caller_frame = get_frame(caller_level) + self.local_path = get_path_from_frame(caller_frame, top_path) + # local_path -- directory of a file (usually setup.py) that + # defines a configuration() function. + # local_path -- directory of a file (usually setup.py) that + # defines a configuration() function. + if top_path is None: + top_path = self.local_path + self.local_path = '' + if package_path is None: + package_path = self.local_path + elif os.path.isdir(njoin(self.local_path, package_path)): + package_path = njoin(self.local_path, package_path) + if not os.path.isdir(package_path or '.'): + raise ValueError("%r is not a directory" % (package_path,)) + self.top_path = top_path + self.package_path = package_path + # this is the relative path in the installed package + self.path_in_package = os.path.join(*self.name.split('.')) + + self.list_keys = self._list_keys[:] + self.dict_keys = self._dict_keys[:] + + for n in self.list_keys: + v = copy.copy(attrs.get(n, [])) + setattr(self, n, as_list(v)) + + for n in self.dict_keys: + v = copy.copy(attrs.get(n, {})) + setattr(self, n, v) + + known_keys = self.list_keys + self.dict_keys + self.extra_keys = self._extra_keys[:] + for n in attrs.keys(): + if n in known_keys: + continue + a = attrs[n] + setattr(self, n, a) + if isinstance(a, list): + self.list_keys.append(n) + elif isinstance(a, dict): + self.dict_keys.append(n) + else: + self.extra_keys.append(n) + + if os.path.exists(njoin(package_path, '__init__.py')): + self.packages.append(self.name) + self.package_dir[self.name] = package_path + + self.options = dict( + ignore_setup_xxx_py = False, + assume_default_configuration = False, + delegate_options_to_subpackages = False, + quiet = False, + ) + + caller_instance = None + for i in range(1, 3): + try: + f = get_frame(i) + except ValueError: + break + try: + caller_instance = eval('self', f.f_globals, f.f_locals) + break + except NameError: + pass + if isinstance(caller_instance, self.__class__): + if caller_instance.options['delegate_options_to_subpackages']: + self.set_options(**caller_instance.options) + + self.setup_name = setup_name + + def todict(self): + """ + Return a dictionary compatible with the keyword arguments of distutils + setup function. + + Examples + -------- + >>> setup(**config.todict()) #doctest: +SKIP + """ + + self._optimize_data_files() + d = {} + known_keys = self.list_keys + self.dict_keys + self.extra_keys + for n in known_keys: + a = getattr(self, n) + if a: + d[n] = a + return d + + def info(self, message): + if not self.options['quiet']: + print(message) + + def warn(self, message): + sys.stderr.write('Warning: %s\n' % (message,)) + + def set_options(self, **options): + """ + Configure Configuration instance. + + The following options are available: + - ignore_setup_xxx_py + - assume_default_configuration + - delegate_options_to_subpackages + - quiet + + """ + for key, value in options.items(): + if key in self.options: + self.options[key] = value + else: + raise ValueError('Unknown option: '+key) + + def get_distribution(self): + """Return the distutils distribution object for self.""" + from numpy.distutils.core import get_distribution + return get_distribution() + + def _wildcard_get_subpackage(self, subpackage_name, + parent_name, + caller_level = 1): + l = subpackage_name.split('.') + subpackage_path = njoin([self.local_path]+l) + dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)] + config_list = [] + for d in dirs: + if not os.path.isfile(njoin(d, '__init__.py')): + continue + if 'build' in d.split(os.sep): + continue + n = '.'.join(d.split(os.sep)[-len(l):]) + c = self.get_subpackage(n, + parent_name = parent_name, + caller_level = caller_level+1) + config_list.extend(c) + return config_list + + def _get_configuration_from_setup_py(self, setup_py, + subpackage_name, + subpackage_path, + parent_name, + caller_level = 1): + # In case setup_py imports local modules: + sys.path.insert(0, os.path.dirname(setup_py)) + try: + setup_name = os.path.splitext(os.path.basename(setup_py))[0] + n = dot_join(self.name, subpackage_name, setup_name) + setup_module = exec_mod_from_location( + '_'.join(n.split('.')), setup_py) + if not hasattr(setup_module, 'configuration'): + if not self.options['assume_default_configuration']: + self.warn('Assuming default configuration '\ + '(%s does not define configuration())'\ + % (setup_module)) + config = Configuration(subpackage_name, parent_name, + self.top_path, subpackage_path, + caller_level = caller_level + 1) + else: + pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) + args = (pn,) + if setup_module.configuration.__code__.co_argcount > 1: + args = args + (self.top_path,) + config = setup_module.configuration(*args) + if config.name!=dot_join(parent_name, subpackage_name): + self.warn('Subpackage %r configuration returned as %r' % \ + (dot_join(parent_name, subpackage_name), config.name)) + finally: + del sys.path[0] + return config + + def get_subpackage(self,subpackage_name, + subpackage_path=None, + parent_name=None, + caller_level = 1): + """Return list of subpackage configurations. + + Parameters + ---------- + subpackage_name : str or None + Name of the subpackage to get the configuration. '*' in + subpackage_name is handled as a wildcard. + subpackage_path : str + If None, then the path is assumed to be the local path plus the + subpackage_name. If a setup.py file is not found in the + subpackage_path, then a default configuration is used. + parent_name : str + Parent name. + """ + if subpackage_name is None: + if subpackage_path is None: + raise ValueError( + "either subpackage_name or subpackage_path must be specified") + subpackage_name = os.path.basename(subpackage_path) + + # handle wildcards + l = subpackage_name.split('.') + if subpackage_path is None and '*' in subpackage_name: + return self._wildcard_get_subpackage(subpackage_name, + parent_name, + caller_level = caller_level+1) + assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) + if subpackage_path is None: + subpackage_path = njoin([self.local_path] + l) + else: + subpackage_path = njoin([subpackage_path] + l[:-1]) + subpackage_path = self.paths([subpackage_path])[0] + setup_py = njoin(subpackage_path, self.setup_name) + if not self.options['ignore_setup_xxx_py']: + if not os.path.isfile(setup_py): + setup_py = njoin(subpackage_path, + 'setup_%s.py' % (subpackage_name)) + if not os.path.isfile(setup_py): + if not self.options['assume_default_configuration']: + self.warn('Assuming default configuration '\ + '(%s/{setup_%s,setup}.py was not found)' \ + % (os.path.dirname(setup_py), subpackage_name)) + config = Configuration(subpackage_name, parent_name, + self.top_path, subpackage_path, + caller_level = caller_level+1) + else: + config = self._get_configuration_from_setup_py( + setup_py, + subpackage_name, + subpackage_path, + parent_name, + caller_level = caller_level + 1) + if config: + return [config] + else: + return [] + + def add_subpackage(self,subpackage_name, + subpackage_path=None, + standalone = False): + """Add a sub-package to the current Configuration instance. + + This is useful in a setup.py script for adding sub-packages to a + package. + + Parameters + ---------- + subpackage_name : str + name of the subpackage + subpackage_path : str + if given, the subpackage path such as the subpackage is in + subpackage_path / subpackage_name. If None,the subpackage is + assumed to be located in the local path / subpackage_name. + standalone : bool + """ + + if standalone: + parent_name = None + else: + parent_name = self.name + config_list = self.get_subpackage(subpackage_name, subpackage_path, + parent_name = parent_name, + caller_level = 2) + if not config_list: + self.warn('No configuration returned, assuming unavailable.') + for config in config_list: + d = config + if isinstance(config, Configuration): + d = config.todict() + assert isinstance(d, dict), repr(type(d)) + + self.info('Appending %s configuration to %s' \ + % (d.get('name'), self.name)) + self.dict_append(**d) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add a subpackage '+ subpackage_name) + + def add_data_dir(self, data_path): + """Recursively add files under data_path to data_files list. + + Recursively add files under data_path to the list of data_files to be + installed (and distributed). The data_path can be either a relative + path-name, or an absolute path-name, or a 2-tuple where the first + argument shows where in the install directory the data directory + should be installed to. + + Parameters + ---------- + data_path : seq or str + Argument can be either + + * 2-sequence (, ) + * path to data directory where python datadir suffix defaults + to package dir. + + Notes + ----- + Rules for installation paths:: + + foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar + (gun, foo/bar) -> parent/gun + foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b + (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun + (gun/*, foo/*) -> parent/gun/a, parent/gun/b + /foo/bar -> (bar, /foo/bar) -> parent/bar + (gun, /foo/bar) -> parent/gun + (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar + + Examples + -------- + For example suppose the source directory contains fun/foo.dat and + fun/bar/car.dat: + + >>> self.add_data_dir('fun') #doctest: +SKIP + >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP + >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP + + Will install data-files to the locations:: + + / + fun/ + foo.dat + bar/ + car.dat + sun/ + foo.dat + bar/ + car.dat + gun/ + foo.dat + car.dat + + """ + if is_sequence(data_path): + d, data_path = data_path + else: + d = None + if is_sequence(data_path): + [self.add_data_dir((d, p)) for p in data_path] + return + if not is_string(data_path): + raise TypeError("not a string: %r" % (data_path,)) + if d is None: + if os.path.isabs(data_path): + return self.add_data_dir((os.path.basename(data_path), data_path)) + return self.add_data_dir((data_path, data_path)) + paths = self.paths(data_path, include_non_existing=False) + if is_glob_pattern(data_path): + if is_glob_pattern(d): + pattern_list = allpath(d).split(os.sep) + pattern_list.reverse() + # /a/*//b/ -> /a/*/b + rl = list(range(len(pattern_list)-1)); rl.reverse() + for i in rl: + if not pattern_list[i]: + del pattern_list[i] + # + for path in paths: + if not os.path.isdir(path): + print('Not a directory, skipping', path) + continue + rpath = rel_path(path, self.local_path) + path_list = rpath.split(os.sep) + path_list.reverse() + target_list = [] + i = 0 + for s in pattern_list: + if is_glob_pattern(s): + if i>=len(path_list): + raise ValueError('cannot fill pattern %r with %r' \ + % (d, path)) + target_list.append(path_list[i]) + else: + assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) + target_list.append(s) + i += 1 + if path_list[i:]: + self.warn('mismatch of pattern_list=%s and path_list=%s'\ + % (pattern_list, path_list)) + target_list.reverse() + self.add_data_dir((os.sep.join(target_list), path)) + else: + for path in paths: + self.add_data_dir((d, path)) + return + assert not is_glob_pattern(d), repr(d) + + dist = self.get_distribution() + if dist is not None and dist.data_files is not None: + data_files = dist.data_files + else: + data_files = self.data_files + + for path in paths: + for d1, f in list(general_source_directories_files(path)): + target_path = os.path.join(self.path_in_package, d, d1) + data_files.append((target_path, f)) + + def _optimize_data_files(self): + data_dict = {} + for p, files in self.data_files: + if p not in data_dict: + data_dict[p] = set() + for f in files: + data_dict[p].add(f) + self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] + + def add_data_files(self,*files): + """Add data files to configuration data_files. + + Parameters + ---------- + files : sequence + Argument(s) can be either + + * 2-sequence (,) + * paths to data files where python datadir prefix defaults + to package dir. + + Notes + ----- + The form of each element of the files sequence is very flexible + allowing many combinations of where to get the files from the package + and where they should ultimately be installed on the system. The most + basic usage is for an element of the files argument sequence to be a + simple filename. This will cause that file from the local path to be + installed to the installation path of the self.name package (package + path). The file argument can also be a relative path in which case the + entire relative path will be installed into the package directory. + Finally, the file can be an absolute path name in which case the file + will be found at the absolute path name but installed to the package + path. + + This basic behavior can be augmented by passing a 2-tuple in as the + file argument. The first element of the tuple should specify the + relative path (under the package install directory) where the + remaining sequence of files should be installed to (it has nothing to + do with the file-names in the source distribution). The second element + of the tuple is the sequence of files that should be installed. The + files in this sequence can be filenames, relative paths, or absolute + paths. For absolute paths the file will be installed in the top-level + package installation directory (regardless of the first argument). + Filenames and relative path names will be installed in the package + install directory under the path name given as the first element of + the tuple. + + Rules for installation paths: + + #. file.txt -> (., file.txt)-> parent/file.txt + #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt + #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt + #. ``*``.txt -> parent/a.txt, parent/b.txt + #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt + #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt + #. (sun, file.txt) -> parent/sun/file.txt + #. (sun, bar/file.txt) -> parent/sun/file.txt + #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt + #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt + + An additional feature is that the path to a data-file can actually be + a function that takes no arguments and returns the actual path(s) to + the data-files. This is useful when the data files are generated while + building the package. + + Examples + -------- + Add files to the list of data_files to be included with the package. + + >>> self.add_data_files('foo.dat', + ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), + ... 'bar/cat.dat', + ... '/full/path/to/can.dat') #doctest: +SKIP + + will install these data files to:: + + / + foo.dat + fun/ + gun.dat + nun/ + pun.dat + sun.dat + bar/ + car.dat + can.dat + + where is the package (or sub-package) + directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage') or + '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). + """ + + if len(files)>1: + for f in files: + self.add_data_files(f) + return + assert len(files)==1 + if is_sequence(files[0]): + d, files = files[0] + else: + d = None + if is_string(files): + filepat = files + elif is_sequence(files): + if len(files)==1: + filepat = files[0] + else: + for f in files: + self.add_data_files((d, f)) + return + else: + raise TypeError(repr(type(files))) + + if d is None: + if hasattr(filepat, '__call__'): + d = '' + elif os.path.isabs(filepat): + d = '' + else: + d = os.path.dirname(filepat) + self.add_data_files((d, files)) + return + + paths = self.paths(filepat, include_non_existing=False) + if is_glob_pattern(filepat): + if is_glob_pattern(d): + pattern_list = d.split(os.sep) + pattern_list.reverse() + for path in paths: + path_list = path.split(os.sep) + path_list.reverse() + path_list.pop() # filename + target_list = [] + i = 0 + for s in pattern_list: + if is_glob_pattern(s): + target_list.append(path_list[i]) + i += 1 + else: + target_list.append(s) + target_list.reverse() + self.add_data_files((os.sep.join(target_list), path)) + else: + self.add_data_files((d, paths)) + return + assert not is_glob_pattern(d), repr((d, filepat)) + + dist = self.get_distribution() + if dist is not None and dist.data_files is not None: + data_files = dist.data_files + else: + data_files = self.data_files + + data_files.append((os.path.join(self.path_in_package, d), paths)) + + ### XXX Implement add_py_modules + + def add_define_macros(self, macros): + """Add define macros to configuration + + Add the given sequence of macro name and value duples to the beginning + of the define_macros list This list will be visible to all extension + modules of the current package. + """ + dist = self.get_distribution() + if dist is not None: + if not hasattr(dist, 'define_macros'): + dist.define_macros = [] + dist.define_macros.extend(macros) + else: + self.define_macros.extend(macros) + + + def add_include_dirs(self,*paths): + """Add paths to configuration include directories. + + Add the given sequence of paths to the beginning of the include_dirs + list. This list will be visible to all extension modules of the + current package. + """ + include_dirs = self.paths(paths) + dist = self.get_distribution() + if dist is not None: + if dist.include_dirs is None: + dist.include_dirs = [] + dist.include_dirs.extend(include_dirs) + else: + self.include_dirs.extend(include_dirs) + + def add_headers(self,*files): + """Add installable headers to configuration. + + Add the given sequence of files to the beginning of the headers list. + By default, headers will be installed under // directory. If an item of files + is a tuple, then its first argument specifies the actual installation + location relative to the path. + + Parameters + ---------- + files : str or seq + Argument(s) can be either: + + * 2-sequence (,) + * path(s) to header file(s) where python includedir suffix will + default to package name. + """ + headers = [] + for path in files: + if is_string(path): + [headers.append((self.name, p)) for p in self.paths(path)] + else: + if not isinstance(path, (tuple, list)) or len(path) != 2: + raise TypeError(repr(path)) + [headers.append((path[0], p)) for p in self.paths(path[1])] + dist = self.get_distribution() + if dist is not None: + if dist.headers is None: + dist.headers = [] + dist.headers.extend(headers) + else: + self.headers.extend(headers) + + def paths(self,*paths,**kws): + """Apply glob to paths and prepend local_path if needed. + + Applies glob.glob(...) to each path in the sequence (if needed) and + pre-pends the local_path if needed. Because this is called on all + source lists, this allows wildcard characters to be specified in lists + of sources for extension modules and libraries and scripts and allows + path-names be relative to the source directory. + + """ + include_non_existing = kws.get('include_non_existing', True) + return gpaths(paths, + local_path = self.local_path, + include_non_existing=include_non_existing) + + def _fix_paths_dict(self, kw): + for k in kw.keys(): + v = kw[k] + if k in ['sources', 'depends', 'include_dirs', 'library_dirs', + 'module_dirs', 'extra_objects']: + new_v = self.paths(v) + kw[k] = new_v + + def add_extension(self,name,sources,**kw): + """Add extension to configuration. + + Create and add an Extension instance to the ext_modules list. This + method also takes the following optional keyword arguments that are + passed on to the Extension constructor. + + Parameters + ---------- + name : str + name of the extension + sources : seq + list of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + include_dirs : + define_macros : + undef_macros : + library_dirs : + libraries : + runtime_library_dirs : + extra_objects : + extra_compile_args : + extra_link_args : + extra_f77_compile_args : + extra_f90_compile_args : + export_symbols : + swig_opts : + depends : + The depends list contains paths to files or directories that the + sources of the extension module depend on. If any path in the + depends list is newer than the extension module, then the module + will be rebuilt. + language : + f2py_options : + module_dirs : + extra_info : dict or list + dict or list of dict of keywords to be appended to keywords. + + Notes + ----- + The self.paths(...) method is applied to all lists that may contain + paths. + """ + ext_args = copy.copy(kw) + ext_args['name'] = dot_join(self.name, name) + ext_args['sources'] = sources + + if 'extra_info' in ext_args: + extra_info = ext_args['extra_info'] + del ext_args['extra_info'] + if isinstance(extra_info, dict): + extra_info = [extra_info] + for info in extra_info: + assert isinstance(info, dict), repr(info) + dict_append(ext_args,**info) + + self._fix_paths_dict(ext_args) + + # Resolve out-of-tree dependencies + libraries = ext_args.get('libraries', []) + libnames = [] + ext_args['libraries'] = [] + for libname in libraries: + if isinstance(libname, tuple): + self._fix_paths_dict(libname[1]) + + # Handle library names of the form libname@relative/path/to/library + if '@' in libname: + lname, lpath = libname.split('@', 1) + lpath = os.path.abspath(njoin(self.local_path, lpath)) + if os.path.isdir(lpath): + c = self.get_subpackage(None, lpath, + caller_level = 2) + if isinstance(c, Configuration): + c = c.todict() + for l in [l[0] for l in c.get('libraries', [])]: + llname = l.split('__OF__', 1)[0] + if llname == lname: + c.pop('name', None) + dict_append(ext_args,**c) + break + continue + libnames.append(libname) + + ext_args['libraries'] = libnames + ext_args['libraries'] + ext_args['define_macros'] = \ + self.define_macros + ext_args.get('define_macros', []) + + from numpy.distutils.core import Extension + ext = Extension(**ext_args) + self.ext_modules.append(ext) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add an extension '+name) + return ext + + def add_library(self,name,sources,**build_info): + """ + Add library to configuration. + + Parameters + ---------- + name : str + Name of the extension. + sources : sequence + List of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + build_info : dict, optional + The following keys are allowed: + + * depends + * macros + * include_dirs + * extra_compiler_args + * extra_f77_compile_args + * extra_f90_compile_args + * f2py_options + * language + + """ + self._add_library(name, sources, None, build_info) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add a library '+ name) + + def _add_library(self, name, sources, install_dir, build_info): + """Common implementation for add_library and add_installed_library. Do + not use directly""" + build_info = copy.copy(build_info) + build_info['sources'] = sources + + # Sometimes, depends is not set up to an empty list by default, and if + # depends is not given to add_library, distutils barfs (#1134) + if not 'depends' in build_info: + build_info['depends'] = [] + + self._fix_paths_dict(build_info) + + # Add to libraries list so that it is build with build_clib + self.libraries.append((name, build_info)) + + def add_installed_library(self, name, sources, install_dir, build_info=None): + """ + Similar to add_library, but the specified library is installed. + + Most C libraries used with ``distutils`` are only used to build python + extensions, but libraries built through this method will be installed + so that they can be reused by third-party packages. + + Parameters + ---------- + name : str + Name of the installed library. + sources : sequence + List of the library's source files. See `add_library` for details. + install_dir : str + Path to install the library, relative to the current sub-package. + build_info : dict, optional + The following keys are allowed: + + * depends + * macros + * include_dirs + * extra_compiler_args + * extra_f77_compile_args + * extra_f90_compile_args + * f2py_options + * language + + Returns + ------- + None + + See Also + -------- + add_library, add_npy_pkg_config, get_info + + Notes + ----- + The best way to encode the options required to link against the specified + C libraries is to use a "libname.ini" file, and use `get_info` to + retrieve the required options (see `add_npy_pkg_config` for more + information). + + """ + if not build_info: + build_info = {} + + install_dir = os.path.join(self.package_path, install_dir) + self._add_library(name, sources, install_dir, build_info) + self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) + + def add_npy_pkg_config(self, template, install_dir, subst_dict=None): + """ + Generate and install a npy-pkg config file from a template. + + The config file generated from `template` is installed in the + given install directory, using `subst_dict` for variable substitution. + + Parameters + ---------- + template : str + The path of the template, relatively to the current package path. + install_dir : str + Where to install the npy-pkg config file, relatively to the current + package path. + subst_dict : dict, optional + If given, any string of the form ``@key@`` will be replaced by + ``subst_dict[key]`` in the template file when installed. The install + prefix is always available through the variable ``@prefix@``, since the + install prefix is not easy to get reliably from setup.py. + + See also + -------- + add_installed_library, get_info + + Notes + ----- + This works for both standard installs and in-place builds, i.e. the + ``@prefix@`` refer to the source directory for in-place builds. + + Examples + -------- + :: + + config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) + + Assuming the foo.ini.in file has the following content:: + + [meta] + Name=@foo@ + Version=1.0 + Description=dummy description + + [default] + Cflags=-I@prefix@/include + Libs= + + The generated file will have the following content:: + + [meta] + Name=bar + Version=1.0 + Description=dummy description + + [default] + Cflags=-Iprefix_dir/include + Libs= + + and will be installed as foo.ini in the 'lib' subpath. + + When cross-compiling with numpy distutils, it might be necessary to + use modified npy-pkg-config files. Using the default/generated files + will link with the host libraries (i.e. libnpymath.a). For + cross-compilation you of-course need to link with target libraries, + while using the host Python installation. + + You can copy out the numpy/_core/lib/npy-pkg-config directory, add a + pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment + variable to point to the directory with the modified npy-pkg-config + files. + + Example npymath.ini modified for cross-compilation:: + + [meta] + Name=npymath + Description=Portable, core math library implementing C99 standard + Version=0.1 + + [variables] + pkgname=numpy._core + pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/_core + prefix=${pkgdir} + libdir=${prefix}/lib + includedir=${prefix}/include + + [default] + Libs=-L${libdir} -lnpymath + Cflags=-I${includedir} + Requires=mlib + + [msvc] + Libs=/LIBPATH:${libdir} npymath.lib + Cflags=/INCLUDE:${includedir} + Requires=mlib + + """ + if subst_dict is None: + subst_dict = {} + template = os.path.join(self.package_path, template) + + if self.name in self.installed_pkg_config: + self.installed_pkg_config[self.name].append((template, install_dir, + subst_dict)) + else: + self.installed_pkg_config[self.name] = [(template, install_dir, + subst_dict)] + + + def add_scripts(self,*files): + """Add scripts to configuration. + + Add the sequence of files to the beginning of the scripts list. + Scripts will be installed under the /bin/ directory. + + """ + scripts = self.paths(files) + dist = self.get_distribution() + if dist is not None: + if dist.scripts is None: + dist.scripts = [] + dist.scripts.extend(scripts) + else: + self.scripts.extend(scripts) + + def dict_append(self,**dict): + for key in self.list_keys: + a = getattr(self, key) + a.extend(dict.get(key, [])) + for key in self.dict_keys: + a = getattr(self, key) + a.update(dict.get(key, {})) + known_keys = self.list_keys + self.dict_keys + self.extra_keys + for key in dict.keys(): + if key not in known_keys: + a = getattr(self, key, None) + if a and a==dict[key]: continue + self.warn('Inheriting attribute %r=%r from %r' \ + % (key, dict[key], dict.get('name', '?'))) + setattr(self, key, dict[key]) + self.extra_keys.append(key) + elif key in self.extra_keys: + self.info('Ignoring attempt to set %r (from %r to %r)' \ + % (key, getattr(self, key), dict[key])) + elif key in known_keys: + # key is already processed above + pass + else: + raise ValueError("Don't know about key=%r" % (key)) + + def __str__(self): + from pprint import pformat + known_keys = self.list_keys + self.dict_keys + self.extra_keys + s = '<'+5*'-' + '\n' + s += 'Configuration of '+self.name+':\n' + known_keys.sort() + for k in known_keys: + a = getattr(self, k, None) + if a: + s += '%s = %s\n' % (k, pformat(a)) + s += 5*'-' + '>' + return s + + def get_config_cmd(self): + """ + Returns the numpy.distutils config command instance. + """ + cmd = get_cmd('config') + cmd.ensure_finalized() + cmd.dump_source = 0 + cmd.noisy = 0 + old_path = os.environ.get('PATH') + if old_path: + path = os.pathsep.join(['.', old_path]) + os.environ['PATH'] = path + return cmd + + def get_build_temp_dir(self): + """ + Return a path to a temporary directory where temporary files should be + placed. + """ + cmd = get_cmd('build') + cmd.ensure_finalized() + return cmd.build_temp + + def have_f77c(self): + """Check for availability of Fortran 77 compiler. + + Use it inside source generating function to ensure that + setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 77 compiler is available (because a simple Fortran 77 + code was able to be compiled successfully). + """ + simple_fortran_subroutine = ''' + subroutine simple + end + ''' + config_cmd = self.get_config_cmd() + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') + return flag + + def have_f90c(self): + """Check for availability of Fortran 90 compiler. + + Use it inside source generating function to ensure that + setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 90 compiler is available (because a simple Fortran + 90 code was able to be compiled successfully) + """ + simple_fortran_subroutine = ''' + subroutine simple + end + ''' + config_cmd = self.get_config_cmd() + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') + return flag + + def append_to(self, extlib): + """Append libraries, include_dirs to extension or library item. + """ + if is_sequence(extlib): + lib_name, build_info = extlib + dict_append(build_info, + libraries=self.libraries, + include_dirs=self.include_dirs) + else: + from numpy.distutils.core import Extension + assert isinstance(extlib, Extension), repr(extlib) + extlib.libraries.extend(self.libraries) + extlib.include_dirs.extend(self.include_dirs) + + def _get_svn_revision(self, path): + """Return path's SVN revision number. + """ + try: + output = subprocess.check_output(['svnversion'], cwd=path) + except (subprocess.CalledProcessError, OSError): + pass + else: + m = re.match(rb'(?P\d+)', output) + if m: + return int(m.group('revision')) + + if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): + entries = njoin(path, '_svn', 'entries') + else: + entries = njoin(path, '.svn', 'entries') + if os.path.isfile(entries): + with open(entries) as f: + fstr = f.read() + if fstr[:5] == '\d+)"', fstr) + if m: + return int(m.group('revision')) + else: # non-xml entries file --- check to be sure that + m = re.search(r'dir[\n\r]+(?P\d+)', fstr) + if m: + return int(m.group('revision')) + return None + + def _get_hg_revision(self, path): + """Return path's Mercurial revision number. + """ + try: + output = subprocess.check_output( + ['hg', 'identify', '--num'], cwd=path) + except (subprocess.CalledProcessError, OSError): + pass + else: + m = re.match(rb'(?P\d+)', output) + if m: + return int(m.group('revision')) + + branch_fn = njoin(path, '.hg', 'branch') + branch_cache_fn = njoin(path, '.hg', 'branch.cache') + + if os.path.isfile(branch_fn): + branch0 = None + with open(branch_fn) as f: + revision0 = f.read().strip() + + branch_map = {} + with open(branch_cache_fn) as f: + for line in f: + branch1, revision1 = line.split()[:2] + if revision1==revision0: + branch0 = branch1 + try: + revision1 = int(revision1) + except ValueError: + continue + branch_map[branch1] = revision1 + + return branch_map.get(branch0) + + return None + + + def get_version(self, version_file=None, version_variable=None): + """Try to get version string of a package. + + Return a version string of the current package or None if the version + information could not be detected. + + Notes + ----- + This method scans files named + __version__.py, _version.py, version.py, and + __svn_version__.py for string variables version, __version__, and + _version, until a version number is found. + """ + version = getattr(self, 'version', None) + if version is not None: + return version + + # Get version from version file. + if version_file is None: + files = ['__version__.py', + self.name.split('.')[-1]+'_version.py', + 'version.py', + '__svn_version__.py', + '__hg_version__.py'] + else: + files = [version_file] + if version_variable is None: + version_vars = ['version', + '__version__', + self.name.split('.')[-1]+'_version'] + else: + version_vars = [version_variable] + for f in files: + fn = njoin(self.local_path, f) + if os.path.isfile(fn): + info = ('.py', 'U', 1) + name = os.path.splitext(os.path.basename(fn))[0] + n = dot_join(self.name, name) + try: + version_module = exec_mod_from_location( + '_'.join(n.split('.')), fn) + except ImportError as e: + self.warn(str(e)) + version_module = None + if version_module is None: + continue + + for a in version_vars: + version = getattr(version_module, a, None) + if version is not None: + break + + # Try if versioneer module + try: + version = version_module.get_versions()['version'] + except AttributeError: + pass + + if version is not None: + break + + if version is not None: + self.version = version + return version + + # Get version as SVN or Mercurial revision number + revision = self._get_svn_revision(self.local_path) + if revision is None: + revision = self._get_hg_revision(self.local_path) + + if revision is not None: + version = str(revision) + self.version = version + + return version + + def make_svn_version_py(self, delete=True): + """Appends a data function to the data_files list that will generate + __svn_version__.py file to the current package directory. + + Generate package __svn_version__.py file from SVN revision number, + it will be removed after python exits but will be available + when sdist, etc commands are executed. + + Notes + ----- + If __svn_version__.py existed before, nothing is done. + + This is + intended for working with source directories that are in an SVN + repository. + """ + target = njoin(self.local_path, '__svn_version__.py') + revision = self._get_svn_revision(self.local_path) + if os.path.isfile(target) or revision is None: + return + else: + def generate_svn_version_py(): + if not os.path.isfile(target): + version = str(revision) + self.info('Creating %s (version=%r)' % (target, version)) + with open(target, 'w') as f: + f.write('version = %r\n' % (version)) + + def rm_file(f=target,p=self.info): + if delete: + try: os.remove(f); p('removed '+f) + except OSError: pass + try: os.remove(f+'c'); p('removed '+f+'c') + except OSError: pass + + atexit.register(rm_file) + + return target + + self.add_data_files(('', generate_svn_version_py())) + + def make_hg_version_py(self, delete=True): + """Appends a data function to the data_files list that will generate + __hg_version__.py file to the current package directory. + + Generate package __hg_version__.py file from Mercurial revision, + it will be removed after python exits but will be available + when sdist, etc commands are executed. + + Notes + ----- + If __hg_version__.py existed before, nothing is done. + + This is intended for working with source directories that are + in an Mercurial repository. + """ + target = njoin(self.local_path, '__hg_version__.py') + revision = self._get_hg_revision(self.local_path) + if os.path.isfile(target) or revision is None: + return + else: + def generate_hg_version_py(): + if not os.path.isfile(target): + version = str(revision) + self.info('Creating %s (version=%r)' % (target, version)) + with open(target, 'w') as f: + f.write('version = %r\n' % (version)) + + def rm_file(f=target,p=self.info): + if delete: + try: os.remove(f); p('removed '+f) + except OSError: pass + try: os.remove(f+'c'); p('removed '+f+'c') + except OSError: pass + + atexit.register(rm_file) + + return target + + self.add_data_files(('', generate_hg_version_py())) + + def make_config_py(self,name='__config__'): + """Generate package __config__.py file containing system_info + information used during building the package. + + This file is installed to the + package installation directory. + + """ + self.py_modules.append((self.name, name, generate_config_py)) + + def get_info(self,*names): + """Get resources information. + + Return information (from system_info.get_info) for all of the names in + the argument list in a single dictionary. + """ + from .system_info import get_info, dict_append + info_dict = {} + for a in names: + dict_append(info_dict,**get_info(a)) + return info_dict + + +def get_cmd(cmdname, _cache={}): + if cmdname not in _cache: + import distutils.core + dist = distutils.core._setup_distribution + if dist is None: + from distutils.errors import DistutilsInternalError + raise DistutilsInternalError( + 'setup distribution instance not initialized') + cmd = dist.get_command_obj(cmdname) + _cache[cmdname] = cmd + return _cache[cmdname] + +def get_numpy_include_dirs(): + # numpy_include_dirs are set by numpy/_core/setup.py, otherwise [] + include_dirs = Configuration.numpy_include_dirs[:] + if not include_dirs: + import numpy + include_dirs = [ numpy.get_include() ] + # else running numpy/_core/setup.py + return include_dirs + +def get_npy_pkg_dir(): + """Return the path where to find the npy-pkg-config directory. + + If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that + is returned. Otherwise, a path inside the location of the numpy module is + returned. + + The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining + customized npy-pkg-config .ini files for the cross-compilation + environment, and using them when cross-compiling. + + """ + d = os.environ.get('NPY_PKG_CONFIG_PATH') + if d is not None: + return d + spec = importlib.util.find_spec('numpy') + d = os.path.join(os.path.dirname(spec.origin), + '_core', 'lib', 'npy-pkg-config') + return d + +def get_pkg_info(pkgname, dirs=None): + """ + Return library info for the given package. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are searched prior to the + NumPy directory. + + Returns + ------- + pkginfo : class instance + The `LibraryInfo` instance containing the build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + Configuration.add_npy_pkg_config, Configuration.add_installed_library, + get_info + + """ + from numpy.distutils.npy_pkg_config import read_config + + if dirs: + dirs.append(get_npy_pkg_dir()) + else: + dirs = [get_npy_pkg_dir()] + return read_config(pkgname, dirs) + +def get_info(pkgname, dirs=None): + """ + Return an info dict for a given C library. + + The info dict contains the necessary options to use the C library. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are searched prior to the + NumPy directory. + + Returns + ------- + info : dict + The dictionary with build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + Configuration.add_npy_pkg_config, Configuration.add_installed_library, + get_pkg_info + + Examples + -------- + To get the necessary information for the npymath library from NumPy: + + >>> npymath_info = np.distutils.misc_util.get_info('npymath') + >>> npymath_info #doctest: +SKIP + {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': + ['.../numpy/_core/lib'], 'include_dirs': ['.../numpy/_core/include']} + + This info dict can then be used as input to a `Configuration` instance:: + + config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) + + """ + from numpy.distutils.npy_pkg_config import parse_flags + pkg_info = get_pkg_info(pkgname, dirs) + + # Translate LibraryInfo instance into a build_info dict + info = parse_flags(pkg_info.cflags()) + for k, v in parse_flags(pkg_info.libs()).items(): + info[k].extend(v) + + # add_extension extra_info argument is ANAL + info['define_macros'] = info['macros'] + del info['macros'] + del info['ignored'] + + return info + +def is_bootstrapping(): + import builtins + + try: + builtins.__NUMPY_SETUP__ + return True + except AttributeError: + return False + + +######################### + +def default_config_dict(name = None, parent_name = None, local_path=None): + """Return a configuration dictionary for usage in + configuration() function defined in file setup_.py. + """ + import warnings + warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ + 'deprecated default_config_dict(%r,%r,%r)' + % (name, parent_name, local_path, + name, parent_name, local_path, + ), stacklevel=2) + c = Configuration(name, parent_name, local_path) + return c.todict() + + +def dict_append(d, **kws): + for k, v in kws.items(): + if k in d: + ov = d[k] + if isinstance(ov, str): + d[k] = v + else: + d[k].extend(v) + else: + d[k] = v + +def appendpath(prefix, path): + if os.path.sep != '/': + prefix = prefix.replace('/', os.path.sep) + path = path.replace('/', os.path.sep) + drive = '' + if os.path.isabs(path): + drive = os.path.splitdrive(prefix)[0] + absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] + pathdrive, path = os.path.splitdrive(path) + d = os.path.commonprefix([absprefix, path]) + if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ + or os.path.join(path[:len(d)], path[len(d):]) != path: + # Handle invalid paths + d = os.path.dirname(d) + subpath = path[len(d):] + if os.path.isabs(subpath): + subpath = subpath[1:] + else: + subpath = path + return os.path.normpath(njoin(drive + prefix, subpath)) + +def generate_config_py(target): + """Generate config.py file containing system_info information + used during building the package. + + Usage: + config['py_modules'].append((packagename, '__config__',generate_config_py)) + """ + from numpy.distutils.system_info import system_info + from distutils.dir_util import mkpath + mkpath(os.path.dirname(target)) + with open(target, 'w') as f: + f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) + f.write('# It contains system_info results at the time of building this package.\n') + f.write('__all__ = ["get_info","show"]\n\n') + + # For gfortran+msvc combination, extra shared libraries may exist + f.write(textwrap.dedent(""" + import os + import sys + + extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') + + if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): + os.add_dll_directory(extra_dll_dir) + + """)) + + for k, i in system_info.saved_results.items(): + f.write('%s=%r\n' % (k, i)) + f.write(textwrap.dedent(r''' + def get_info(name): + g = globals() + return g.get(name, g.get(name + "_info", {})) + + def show(): + """ + Show libraries in the system on which NumPy was built. + + Print information about various resources (libraries, library + directories, include directories, etc.) in the system on which + NumPy was built. + + See Also + -------- + get_include : Returns the directory containing NumPy C + header files. + + Notes + ----- + 1. Classes specifying the information to be printed are defined + in the `numpy.distutils.system_info` module. + + Information may include: + + * ``language``: language used to write the libraries (mostly + C or f77) + * ``libraries``: names of libraries found in the system + * ``library_dirs``: directories containing the libraries + * ``include_dirs``: directories containing library header files + * ``src_dirs``: directories containing library source files + * ``define_macros``: preprocessor macros used by + ``distutils.setup`` + * ``baseline``: minimum CPU features required + * ``found``: dispatched features supported in the system + * ``not found``: dispatched features that are not supported + in the system + + 2. NumPy BLAS/LAPACK Installation Notes + + Installing a numpy wheel (``pip install numpy`` or force it + via ``pip install numpy --only-binary :numpy: numpy``) includes + an OpenBLAS implementation of the BLAS and LAPACK linear algebra + APIs. In this case, ``library_dirs`` reports the original build + time configuration as compiled with gcc/gfortran; at run time + the OpenBLAS library is in + ``site-packages/numpy.libs/`` (linux), or + ``site-packages/numpy/.dylibs/`` (macOS), or + ``site-packages/numpy/.libs/`` (windows). + + Installing numpy from source + (``pip install numpy --no-binary numpy``) searches for BLAS and + LAPACK dynamic link libraries at build time as influenced by + environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and + NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER; + or the optional file ``~/.numpy-site.cfg``. + NumPy remembers those locations and expects to load the same + libraries at run-time. + In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS + library) is in the default build-time search order after + 'openblas'. + + Examples + -------- + >>> import numpy as np + >>> np.show_config() + blas_opt_info: + language = c + define_macros = [('HAVE_CBLAS', None)] + libraries = ['openblas', 'openblas'] + library_dirs = ['/usr/local/lib'] + """ + from numpy._core._multiarray_umath import ( + __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + ) + for name,info_dict in globals().items(): + if name[0] == "_" or type(info_dict) is not type({}): continue + print(name + ":") + if not info_dict: + print(" NOT AVAILABLE") + for k,v in info_dict.items(): + v = str(v) + if k == "sources" and len(v) > 200: + v = v[:60] + " ...\n... " + v[-60:] + print(" %s = %s" % (k,v)) + + features_found, features_not_found = [], [] + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + features_found.append(feature) + else: + features_not_found.append(feature) + + print("Supported SIMD extensions in this NumPy install:") + print(" baseline = %s" % (','.join(__cpu_baseline__))) + print(" found = %s" % (','.join(features_found))) + print(" not found = %s" % (','.join(features_not_found))) + + ''')) + + return target + +def msvc_version(compiler): + """Return version major and minor of compiler instance if it is + MSVC, raise an exception otherwise.""" + if not compiler.compiler_type == "msvc": + raise ValueError("Compiler instance is not msvc (%s)"\ + % compiler.compiler_type) + return compiler._MSVCCompiler__version + +def get_build_architecture(): + # Importing distutils.msvccompiler triggers a warning on non-Windows + # systems, so delay the import to here. + from distutils.msvccompiler import get_build_architecture + return get_build_architecture() + + +_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'} + + +def sanitize_cxx_flags(cxxflags): + ''' + Some flags are valid for C but not C++. Prune them. + ''' + return [flag for flag in cxxflags if flag not in _cxx_ignore_flags] + + +def exec_mod_from_location(modname, modfile): + ''' + Use importlib machinery to import a module `modname` from the file + `modfile`. Depending on the `spec.loader`, the module may not be + registered in sys.modules. + ''' + spec = importlib.util.spec_from_file_location(modname, modfile) + foo = importlib.util.module_from_spec(spec) + spec.loader.exec_module(foo) + return foo diff --git a/phivenv/Lib/site-packages/numpy/distutils/msvc9compiler.py b/phivenv/Lib/site-packages/numpy/distutils/msvc9compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..1e44d9379dedcd799aa2897cdce3300e6796f170 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/msvc9compiler.py @@ -0,0 +1,63 @@ +import os +from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler + +from .system_info import platform_bits + + +def _merge(old, new): + """Concatenate two environment paths avoiding repeats. + + Here `old` is the environment string before the base class initialize + function is called and `new` is the string after the call. The new string + will be a fixed string if it is not obtained from the current environment, + or the same as the old string if obtained from the same environment. The aim + here is not to append the new string if it is already contained in the old + string so as to limit the growth of the environment string. + + Parameters + ---------- + old : string + Previous environment string. + new : string + New environment string. + + Returns + ------- + ret : string + Updated environment string. + + """ + if not old: + return new + if new in old: + return old + + # Neither new nor old is empty. Give old priority. + return ';'.join([old, new]) + + +class MSVCCompiler(_MSVCCompiler): + def __init__(self, verbose=0, dry_run=0, force=0): + _MSVCCompiler.__init__(self, verbose, dry_run, force) + + def initialize(self, plat_name=None): + # The 'lib' and 'include' variables may be overwritten + # by MSVCCompiler.initialize, so save them for later merge. + environ_lib = os.getenv('lib') + environ_include = os.getenv('include') + _MSVCCompiler.initialize(self, plat_name) + + # Merge current and previous values of 'lib' and 'include' + os.environ['lib'] = _merge(environ_lib, os.environ['lib']) + os.environ['include'] = _merge(environ_include, os.environ['include']) + + # msvc9 building for 32 bits requires SSE2 to work around a + # compiler bug. + if platform_bits == 32: + self.compile_options += ['/arch:SSE2'] + self.compile_options_debug += ['/arch:SSE2'] + + def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): + ld_args.append('/MANIFEST') + _MSVCCompiler.manifest_setup_ldargs(self, output_filename, + build_temp, ld_args) diff --git a/phivenv/Lib/site-packages/numpy/distutils/msvccompiler.py b/phivenv/Lib/site-packages/numpy/distutils/msvccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..b243f4280ad3046ae8e315b0a8b4cbba4c528202 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/msvccompiler.py @@ -0,0 +1,76 @@ +import os +from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler + +from .system_info import platform_bits + + +def _merge(old, new): + """Concatenate two environment paths avoiding repeats. + + Here `old` is the environment string before the base class initialize + function is called and `new` is the string after the call. The new string + will be a fixed string if it is not obtained from the current environment, + or the same as the old string if obtained from the same environment. The aim + here is not to append the new string if it is already contained in the old + string so as to limit the growth of the environment string. + + Parameters + ---------- + old : string + Previous environment string. + new : string + New environment string. + + Returns + ------- + ret : string + Updated environment string. + + """ + if new in old: + return old + if not old: + return new + + # Neither new nor old is empty. Give old priority. + return ';'.join([old, new]) + + +class MSVCCompiler(_MSVCCompiler): + def __init__(self, verbose=0, dry_run=0, force=0): + _MSVCCompiler.__init__(self, verbose, dry_run, force) + + def initialize(self): + # The 'lib' and 'include' variables may be overwritten + # by MSVCCompiler.initialize, so save them for later merge. + environ_lib = os.getenv('lib', '') + environ_include = os.getenv('include', '') + _MSVCCompiler.initialize(self) + + # Merge current and previous values of 'lib' and 'include' + os.environ['lib'] = _merge(environ_lib, os.environ['lib']) + os.environ['include'] = _merge(environ_include, os.environ['include']) + + # msvc9 building for 32 bits requires SSE2 to work around a + # compiler bug. + if platform_bits == 32: + self.compile_options += ['/arch:SSE2'] + self.compile_options_debug += ['/arch:SSE2'] + + +def lib_opts_if_msvc(build_cmd): + """ Add flags if we are using MSVC compiler + + We can't see `build_cmd` in our scope, because we have not initialized + the distutils build command, so use this deferred calculation to run + when we are building the library. + """ + if build_cmd.compiler.compiler_type != 'msvc': + return [] + # Explicitly disable whole-program optimization. + flags = ['/GL-'] + # Disable voltbl section for vc142 to allow link using mingw-w64; see: + # https://github.com/matthew-brett/dll_investigation/issues/1#issuecomment-1100468171 + if build_cmd.compiler_opt.cc_test_flags(['-d2VolatileMetadata-']): + flags.append('-d2VolatileMetadata-') + return flags diff --git a/phivenv/Lib/site-packages/numpy/distutils/npy_pkg_config.py b/phivenv/Lib/site-packages/numpy/distutils/npy_pkg_config.py new file mode 100644 index 0000000000000000000000000000000000000000..2e5912a0ef3eaf10faf137c7fb8f12ac35b400ef --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/npy_pkg_config.py @@ -0,0 +1,441 @@ +import sys +import re +import os + +from configparser import RawConfigParser + +__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', + 'read_config', 'parse_flags'] + +_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}') + +class FormatError(OSError): + """ + Exception thrown when there is a problem parsing a configuration file. + + """ + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +class PkgNotFound(OSError): + """Exception raised when a package can not be located.""" + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +def parse_flags(line): + """ + Parse a line from a config file containing compile flags. + + Parameters + ---------- + line : str + A single line containing one or more compile flags. + + Returns + ------- + d : dict + Dictionary of parsed flags, split into relevant categories. + These categories are the keys of `d`: + + * 'include_dirs' + * 'library_dirs' + * 'libraries' + * 'macros' + * 'ignored' + + """ + d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], + 'macros': [], 'ignored': []} + + flags = (' ' + line).split(' -') + for flag in flags: + flag = '-' + flag + if len(flag) > 0: + if flag.startswith('-I'): + d['include_dirs'].append(flag[2:].strip()) + elif flag.startswith('-L'): + d['library_dirs'].append(flag[2:].strip()) + elif flag.startswith('-l'): + d['libraries'].append(flag[2:].strip()) + elif flag.startswith('-D'): + d['macros'].append(flag[2:].strip()) + else: + d['ignored'].append(flag) + + return d + +def _escape_backslash(val): + return val.replace('\\', '\\\\') + +class LibraryInfo: + """ + Object containing build information about a library. + + Parameters + ---------- + name : str + The library name. + description : str + Description of the library. + version : str + Version string. + sections : dict + The sections of the configuration file for the library. The keys are + the section headers, the values the text under each header. + vars : class instance + A `VariableSet` instance, which contains ``(name, value)`` pairs for + variables defined in the configuration file for the library. + requires : sequence, optional + The required libraries for the library to be installed. + + Notes + ----- + All input parameters (except "sections" which is a method) are available as + attributes of the same name. + + """ + def __init__(self, name, description, version, sections, vars, requires=None): + self.name = name + self.description = description + if requires: + self.requires = requires + else: + self.requires = [] + self.version = version + self._sections = sections + self.vars = vars + + def sections(self): + """ + Return the section headers of the config file. + + Parameters + ---------- + None + + Returns + ------- + keys : list of str + The list of section headers. + + """ + return list(self._sections.keys()) + + def cflags(self, section="default"): + val = self.vars.interpolate(self._sections[section]['cflags']) + return _escape_backslash(val) + + def libs(self, section="default"): + val = self.vars.interpolate(self._sections[section]['libs']) + return _escape_backslash(val) + + def __str__(self): + m = ['Name: %s' % self.name, 'Description: %s' % self.description] + if self.requires: + m.append('Requires:') + else: + m.append('Requires: %s' % ",".join(self.requires)) + m.append('Version: %s' % self.version) + + return "\n".join(m) + +class VariableSet: + """ + Container object for the variables defined in a config file. + + `VariableSet` can be used as a plain dictionary, with the variable names + as keys. + + Parameters + ---------- + d : dict + Dict of items in the "variables" section of the configuration file. + + """ + def __init__(self, d): + self._raw_data = dict([(k, v) for k, v in d.items()]) + + self._re = {} + self._re_sub = {} + + self._init_parse() + + def _init_parse(self): + for k, v in self._raw_data.items(): + self._init_parse_var(k, v) + + def _init_parse_var(self, name, value): + self._re[name] = re.compile(r'\$\{%s\}' % name) + self._re_sub[name] = value + + def interpolate(self, value): + # Brute force: we keep interpolating until there is no '${var}' anymore + # or until interpolated string is equal to input string + def _interpolate(value): + for k in self._re.keys(): + value = self._re[k].sub(self._re_sub[k], value) + return value + while _VAR.search(value): + nvalue = _interpolate(value) + if nvalue == value: + break + value = nvalue + + return value + + def variables(self): + """ + Return the list of variable names. + + Parameters + ---------- + None + + Returns + ------- + names : list of str + The names of all variables in the `VariableSet` instance. + + """ + return list(self._raw_data.keys()) + + # Emulate a dict to set/get variables values + def __getitem__(self, name): + return self._raw_data[name] + + def __setitem__(self, name, value): + self._raw_data[name] = value + self._init_parse_var(name, value) + +def parse_meta(config): + if not config.has_section('meta'): + raise FormatError("No meta section found !") + + d = dict(config.items('meta')) + + for k in ['name', 'description', 'version']: + if not k in d: + raise FormatError("Option %s (section [meta]) is mandatory, " + "but not found" % k) + + if not 'requires' in d: + d['requires'] = [] + + return d + +def parse_variables(config): + if not config.has_section('variables'): + raise FormatError("No variables section found !") + + d = {} + + for name, value in config.items("variables"): + d[name] = value + + return VariableSet(d) + +def parse_sections(config): + return meta_d, r + +def pkg_to_filename(pkg_name): + return "%s.ini" % pkg_name + +def parse_config(filename, dirs=None): + if dirs: + filenames = [os.path.join(d, filename) for d in dirs] + else: + filenames = [filename] + + config = RawConfigParser() + + n = config.read(filenames) + if not len(n) >= 1: + raise PkgNotFound("Could not find file(s) %s" % str(filenames)) + + # Parse meta and variables sections + meta = parse_meta(config) + + vars = {} + if config.has_section('variables'): + for name, value in config.items("variables"): + vars[name] = _escape_backslash(value) + + # Parse "normal" sections + secs = [s for s in config.sections() if not s in ['meta', 'variables']] + sections = {} + + requires = {} + for s in secs: + d = {} + if config.has_option(s, "requires"): + requires[s] = config.get(s, 'requires') + + for name, value in config.items(s): + d[name] = value + sections[s] = d + + return meta, vars, sections, requires + +def _read_config_imp(filenames, dirs=None): + def _read_config(f): + meta, vars, sections, reqs = parse_config(f, dirs) + # recursively add sections and variables of required libraries + for rname, rvalue in reqs.items(): + nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) + + # Update var dict for variables not in 'top' config file + for k, v in nvars.items(): + if not k in vars: + vars[k] = v + + # Update sec dict + for oname, ovalue in nsections[rname].items(): + if ovalue: + sections[rname][oname] += ' %s' % ovalue + + return meta, vars, sections, reqs + + meta, vars, sections, reqs = _read_config(filenames) + + # FIXME: document this. If pkgname is defined in the variables section, and + # there is no pkgdir variable defined, pkgdir is automatically defined to + # the path of pkgname. This requires the package to be imported to work + if not 'pkgdir' in vars and "pkgname" in vars: + pkgname = vars["pkgname"] + if not pkgname in sys.modules: + raise ValueError("You should import %s to get information on %s" % + (pkgname, meta["name"])) + + mod = sys.modules[pkgname] + vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) + + return LibraryInfo(name=meta["name"], description=meta["description"], + version=meta["version"], sections=sections, vars=VariableSet(vars)) + +# Trivial cache to cache LibraryInfo instances creation. To be really +# efficient, the cache should be handled in read_config, since a same file can +# be parsed many time outside LibraryInfo creation, but I doubt this will be a +# problem in practice +_CACHE = {} +def read_config(pkgname, dirs=None): + """ + Return library info for a package from its configuration file. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of directories - usually including + the NumPy base directory - where to look for npy-pkg-config files. + + Returns + ------- + pkginfo : class instance + The `LibraryInfo` instance containing the build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + misc_util.get_info, misc_util.get_pkg_info + + Examples + -------- + >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') + >>> type(npymath_info) + + >>> print(npymath_info) + Name: npymath + Description: Portable, core math library implementing C99 standard + Requires: + Version: 0.1 #random + + """ + try: + return _CACHE[pkgname] + except KeyError: + v = _read_config_imp(pkg_to_filename(pkgname), dirs) + _CACHE[pkgname] = v + return v + +# TODO: +# - implements version comparison (modversion + atleast) + +# pkg-config simple emulator - useful for debugging, and maybe later to query +# the system +if __name__ == '__main__': + from optparse import OptionParser + import glob + + parser = OptionParser() + parser.add_option("--cflags", dest="cflags", action="store_true", + help="output all preprocessor and compiler flags") + parser.add_option("--libs", dest="libs", action="store_true", + help="output all linker flags") + parser.add_option("--use-section", dest="section", + help="use this section instead of default for options") + parser.add_option("--version", dest="version", action="store_true", + help="output version") + parser.add_option("--atleast-version", dest="min_version", + help="Minimal version") + parser.add_option("--list-all", dest="list_all", action="store_true", + help="Minimal version") + parser.add_option("--define-variable", dest="define_variable", + help="Replace variable with the given value") + + (options, args) = parser.parse_args(sys.argv) + + if len(args) < 2: + raise ValueError("Expect package name on the command line:") + + if options.list_all: + files = glob.glob("*.ini") + for f in files: + info = read_config(f) + print("%s\t%s - %s" % (info.name, info.name, info.description)) + + pkg_name = args[1] + d = os.environ.get('NPY_PKG_CONFIG_PATH') + if d: + info = read_config( + pkg_name, ['numpy/_core/lib/npy-pkg-config', '.', d] + ) + else: + info = read_config( + pkg_name, ['numpy/_core/lib/npy-pkg-config', '.'] + ) + + if options.section: + section = options.section + else: + section = "default" + + if options.define_variable: + m = re.search(r'([\S]+)=([\S]+)', options.define_variable) + if not m: + raise ValueError("--define-variable option should be of " + "the form --define-variable=foo=bar") + else: + name = m.group(1) + value = m.group(2) + info.vars[name] = value + + if options.cflags: + print(info.cflags(section)) + if options.libs: + print(info.libs(section)) + if options.version: + print(info.version) + if options.min_version: + print(info.version >= options.min_version) diff --git a/phivenv/Lib/site-packages/numpy/distutils/numpy_distribution.py b/phivenv/Lib/site-packages/numpy/distutils/numpy_distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfd8895ca80fc241b4589bf3cb1a3cd0db7409d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/numpy_distribution.py @@ -0,0 +1,17 @@ +# XXX: Handle setuptools ? +from distutils.core import Distribution + +# This class is used because we add new files (sconscripts, and so on) with the +# scons command +class NumpyDistribution(Distribution): + def __init__(self, attrs = None): + # A list of (sconscripts, pre_hook, post_hook, src, parent_names) + self.scons_data = [] + # A list of installable libraries + self.installed_libraries = [] + # A dict of pkg_config files to generate/install + self.installed_pkg_config = {} + Distribution.__init__(self, attrs) + + def has_scons_scripts(self): + return bool(self.scons_data) diff --git a/phivenv/Lib/site-packages/numpy/distutils/pathccompiler.py b/phivenv/Lib/site-packages/numpy/distutils/pathccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..c4d6ce9246e19185ed7917b196ae1b61be96ca81 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/pathccompiler.py @@ -0,0 +1,21 @@ +from distutils.unixccompiler import UnixCCompiler + +class PathScaleCCompiler(UnixCCompiler): + + """ + PathScale compiler compatible with an gcc built Python. + """ + + compiler_type = 'pathcc' + cc_exe = 'pathcc' + cxx_exe = 'pathCC' + + def __init__ (self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__ (self, verbose, dry_run, force) + cc_compiler = self.cc_exe + cxx_compiler = self.cxx_exe + self.set_executables(compiler=cc_compiler, + compiler_so=cc_compiler, + compiler_cxx=cxx_compiler, + linker_exe=cc_compiler, + linker_so=cc_compiler + ' -shared') diff --git a/phivenv/Lib/site-packages/numpy/distutils/system_info.py b/phivenv/Lib/site-packages/numpy/distutils/system_info.py new file mode 100644 index 0000000000000000000000000000000000000000..35aca2495c5f192a7219c895a0db99b4aa421adf --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/system_info.py @@ -0,0 +1,3268 @@ +#!/usr/bin/env python3 +""" +This file defines a set of system_info classes for getting +information about various resources (libraries, library directories, +include directories, etc.) in the system. Usage: + info_dict = get_info() + where is a string 'atlas','x11','fftw','lapack','blas', + 'lapack_src', 'blas_src', etc. For a complete list of allowed names, + see the definition of get_info() function below. + + Returned info_dict is a dictionary which is compatible with + distutils.setup keyword arguments. If info_dict == {}, then the + asked resource is not available (system_info could not find it). + + Several *_info classes specify an environment variable to specify + the locations of software. When setting the corresponding environment + variable to 'None' then the software will be ignored, even when it + is available in system. + +Global parameters: + system_info.search_static_first - search static libraries (.a) + in precedence to shared ones (.so, .sl) if enabled. + system_info.verbosity - output the results to stdout if enabled. + +The file 'site.cfg' is looked for in + +1) Directory of main setup.py file being run. +2) Home directory of user running the setup.py file as ~/.numpy-site.cfg +3) System wide directory (location of this file...) + +The first one found is used to get system configuration options The +format is that used by ConfigParser (i.e., Windows .INI style). The +section ALL is not intended for general use. + +Appropriate defaults are used if nothing is specified. + +The order of finding the locations of resources is the following: + 1. environment variable + 2. section in site.cfg + 3. DEFAULT section in site.cfg + 4. System default search paths (see ``default_*`` variables below). +Only the first complete match is returned. + +Currently, the following classes are available, along with their section names: + + Numeric_info:Numeric + _numpy_info:Numeric + _pkg_config_info:None + accelerate_info:accelerate + accelerate_lapack_info:accelerate + agg2_info:agg2 + amd_info:amd + atlas_3_10_blas_info:atlas + atlas_3_10_blas_threads_info:atlas + atlas_3_10_info:atlas + atlas_3_10_threads_info:atlas + atlas_blas_info:atlas + atlas_blas_threads_info:atlas + atlas_info:atlas + atlas_threads_info:atlas + blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix) + blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS) + blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix) + blas_info:blas + blas_mkl_info:mkl + blas_ssl2_info:ssl2 + blas_opt_info:ALL # usage recommended + blas_src_info:blas_src + blis_info:blis + boost_python_info:boost_python + dfftw_info:fftw + dfftw_threads_info:fftw + djbfft_info:djbfft + f2py_info:ALL + fft_opt_info:ALL + fftw2_info:fftw + fftw3_info:fftw3 + fftw_info:fftw + fftw_threads_info:fftw + flame_info:flame + freetype2_info:freetype2 + gdk_2_info:gdk_2 + gdk_info:gdk + gdk_pixbuf_2_info:gdk_pixbuf_2 + gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2 + gdk_x11_2_info:gdk_x11_2 + gtkp_2_info:gtkp_2 + gtkp_x11_2_info:gtkp_x11_2 + lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix) + lapack_atlas_3_10_info:atlas + lapack_atlas_3_10_threads_info:atlas + lapack_atlas_info:atlas + lapack_atlas_threads_info:atlas + lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK) + lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix) + lapack_info:lapack + lapack_mkl_info:mkl + lapack_ssl2_info:ssl2 + lapack_opt_info:ALL # usage recommended + lapack_src_info:lapack_src + mkl_info:mkl + ssl2_info:ssl2 + numarray_info:numarray + numerix_info:numerix + numpy_info:numpy + openblas64__info:openblas64_ + openblas64__lapack_info:openblas64_ + openblas_clapack_info:openblas + openblas_ilp64_info:openblas_ilp64 + openblas_ilp64_lapack_info:openblas_ilp64 + openblas_info:openblas + openblas_lapack_info:openblas + sfftw_info:fftw + sfftw_threads_info:fftw + system_info:ALL + umfpack_info:umfpack + wx_info:wx + x11_info:x11 + xft_info:xft + +Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER +and NPY_LAPACK_ORDER environment variables to determine the order in which +specific BLAS and LAPACK libraries are searched for. + +This search (or autodetection) can be bypassed by defining the environment +variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the +exact linker flags to use (language will be set to F77). Building against +Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK +implementations at runtime. If using this to build NumPy itself, it is +recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a +CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized +otherwise). + +Example: +---------- +[DEFAULT] +# default section +library_dirs = /usr/lib:/usr/local/lib:/opt/lib +include_dirs = /usr/include:/usr/local/include:/opt/include +src_dirs = /usr/local/src:/opt/src +# search static libraries (.a) in preference to shared ones (.so) +search_static_first = 0 + +[fftw] +libraries = rfftw, fftw + +[atlas] +library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas +# for overriding the names of the atlas libraries +libraries = lapack, f77blas, cblas, atlas + +[x11] +library_dirs = /usr/X11R6/lib +include_dirs = /usr/X11R6/include +---------- + +Note that the ``libraries`` key is the default setting for libraries. + +Authors: + Pearu Peterson , February 2002 + David M. Cooke , April 2002 + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +""" +import sys +import os +import re +import copy +import warnings +import subprocess +import textwrap + +from glob import glob +from functools import reduce +from configparser import NoOptionError +from configparser import RawConfigParser as ConfigParser +# It seems that some people are importing ConfigParser from here so is +# good to keep its class name. Use of RawConfigParser is needed in +# order to be able to load path names with percent in them, like +# `feature%2Fcool` which is common on git flow branch names. + +from distutils.errors import DistutilsError +from distutils.dist import Distribution +import sysconfig +from numpy.distutils import log +from distutils.util import get_platform + +from numpy.distutils.exec_command import ( + find_executable, filepath_from_subprocess_output, + ) +from numpy.distutils.misc_util import (is_sequence, is_string, + get_shared_lib_extension) +from numpy.distutils.command.config import config as cmd_config +from numpy.distutils import customized_ccompiler as _customized_ccompiler +from numpy.distutils import _shell_utils +import distutils.ccompiler +import tempfile +import shutil + +__all__ = ['system_info'] + +# Determine number of bits +import platform +_bits = {'32bit': 32, '64bit': 64} +platform_bits = _bits[platform.architecture()[0]] + + +global_compiler = None + +def customized_ccompiler(): + global global_compiler + if not global_compiler: + global_compiler = _customized_ccompiler() + return global_compiler + + +def _c_string_literal(s): + """ + Convert a python string into a literal suitable for inclusion into C code + """ + # only these three characters are forbidden in C strings + s = s.replace('\\', r'\\') + s = s.replace('"', r'\"') + s = s.replace('\n', r'\n') + return '"{}"'.format(s) + + +def libpaths(paths, bits): + """Return a list of library paths valid on 32 or 64 bit systems. + + Inputs: + paths : sequence + A sequence of strings (typically paths) + bits : int + An integer, the only valid values are 32 or 64. A ValueError exception + is raised otherwise. + + Examples: + + Consider a list of directories + >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] + + For a 32-bit platform, this is already valid: + >>> np.distutils.system_info.libpaths(paths,32) + ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] + + On 64 bits, we prepend the '64' postfix + >>> np.distutils.system_info.libpaths(paths,64) + ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', + '/usr/lib64', '/usr/lib'] + """ + if bits not in (32, 64): + raise ValueError("Invalid bit size in libpaths: 32 or 64 only") + + # Handle 32bit case + if bits == 32: + return paths + + # Handle 64bit case + out = [] + for p in paths: + out.extend([p + '64', p]) + + return out + + +if sys.platform == 'win32': + default_lib_dirs = ['C:\\', + os.path.join(sysconfig.get_config_var('exec_prefix'), + 'libs')] + default_runtime_dirs = [] + default_include_dirs = [] + default_src_dirs = ['.'] + default_x11_lib_dirs = [] + default_x11_include_dirs = [] + _include_dirs = [ + 'include', + 'include/suitesparse', + ] + _lib_dirs = [ + 'lib', + ] + + _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] + _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] + def add_system_root(library_root): + """Add a package manager root to the include directories""" + global default_lib_dirs + global default_include_dirs + + library_root = os.path.normpath(library_root) + + default_lib_dirs.extend( + os.path.join(library_root, d) for d in _lib_dirs) + default_include_dirs.extend( + os.path.join(library_root, d) for d in _include_dirs) + + # VCpkg is the de-facto package manager on windows for C/C++ + # libraries. If it is on the PATH, then we append its paths here. + vcpkg = shutil.which('vcpkg') + if vcpkg: + vcpkg_dir = os.path.dirname(vcpkg) + if platform.architecture()[0] == '32bit': + specifier = 'x86' + else: + specifier = 'x64' + + vcpkg_installed = os.path.join(vcpkg_dir, 'installed') + for vcpkg_root in [ + os.path.join(vcpkg_installed, specifier + '-windows'), + os.path.join(vcpkg_installed, specifier + '-windows-static'), + ]: + add_system_root(vcpkg_root) + + # Conda is another popular package manager that provides libraries + conda = shutil.which('conda') + if conda: + conda_dir = os.path.dirname(conda) + add_system_root(os.path.join(conda_dir, '..', 'Library')) + add_system_root(os.path.join(conda_dir, 'Library')) + +else: + default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', + '/opt/local/lib', '/sw/lib'], platform_bits) + default_runtime_dirs = [] + default_include_dirs = ['/usr/local/include', + '/opt/include', + # path of umfpack under macports + '/opt/local/include/ufsparse', + '/opt/local/include', '/sw/include', + '/usr/include/suitesparse'] + default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] + + default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', + '/usr/lib'], platform_bits) + default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include'] + + if os.path.exists('/usr/lib/X11'): + globbed_x11_dir = glob('/usr/lib/*/libX11.so') + if globbed_x11_dir: + x11_so_dir = os.path.split(globbed_x11_dir[0])[0] + default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) + default_x11_include_dirs.extend(['/usr/lib/X11/include', + '/usr/include/X11']) + + with open(os.devnull, 'w') as tmp: + try: + p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE, + stderr=tmp) + except (OSError, DistutilsError): + # OSError if gcc is not installed, or SandboxViolation (DistutilsError + # subclass) if an old setuptools bug is triggered (see gh-3160). + pass + else: + triplet = str(p.communicate()[0].decode().strip()) + if p.returncode == 0: + # gcc supports the "-print-multiarch" option + default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] + default_lib_dirs += [os.path.join("/usr/lib/", triplet)] + + +if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: + default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) + default_include_dirs.append(os.path.join(sys.prefix, 'include')) + default_src_dirs.append(os.path.join(sys.prefix, 'src')) + +default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] +default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] +default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] +default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] + +so_ext = get_shared_lib_extension() + + +def get_standard_file(fname): + """Returns a list of files named 'fname' from + 1) System-wide directory (directory-location of this module) + 2) Users HOME directory (os.environ['HOME']) + 3) Local directory + """ + # System-wide file + filenames = [] + try: + f = __file__ + except NameError: + f = sys.argv[0] + sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], + fname) + if os.path.isfile(sysfile): + filenames.append(sysfile) + + # Home directory + # And look for the user config file + try: + f = os.path.expanduser('~') + except KeyError: + pass + else: + user_file = os.path.join(f, fname) + if os.path.isfile(user_file): + filenames.append(user_file) + + # Local file + if os.path.isfile(fname): + filenames.append(os.path.abspath(fname)) + + return filenames + + +def _parse_env_order(base_order, env): + """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order` + + This method will sequence the environment variable and check for their + individual elements in `base_order`. + + The items in the environment variable may be negated via '^item' or '!itema,itemb'. + It must start with ^/! to negate all options. + + Raises + ------ + ValueError: for mixed negated and non-negated orders or multiple negated orders + + Parameters + ---------- + base_order : list of str + the base list of orders + env : str + the environment variable to be parsed, if none is found, `base_order` is returned + + Returns + ------- + allow_order : list of str + allowed orders in lower-case + unknown_order : list of str + for values not overlapping with `base_order` + """ + order_str = os.environ.get(env, None) + + # ensure all base-orders are lower-case (for easier comparison) + base_order = [order.lower() for order in base_order] + if order_str is None: + return base_order, [] + + neg = order_str.startswith('^') or order_str.startswith('!') + # Check format + order_str_l = list(order_str) + sum_neg = order_str_l.count('^') + order_str_l.count('!') + if neg: + if sum_neg > 1: + raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}") + # remove prefix + order_str = order_str[1:] + elif sum_neg > 0: + raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}") + + # Split and lower case + orders = order_str.lower().split(',') + + # to inform callee about non-overlapping elements + unknown_order = [] + + # if negated, we have to remove from the order + if neg: + allow_order = base_order.copy() + + for order in orders: + if not order: + continue + + if order not in base_order: + unknown_order.append(order) + continue + + if order in allow_order: + allow_order.remove(order) + + else: + allow_order = [] + + for order in orders: + if not order: + continue + + if order not in base_order: + unknown_order.append(order) + continue + + if order not in allow_order: + allow_order.append(order) + + return allow_order, unknown_order + + +def get_info(name, notfound_action=0): + """ + notfound_action: + 0 - do nothing + 1 - display warning message + 2 - raise error + """ + cl = {'armpl': armpl_info, + 'blas_armpl': blas_armpl_info, + 'lapack_armpl': lapack_armpl_info, + 'fftw3_armpl': fftw3_armpl_info, + 'atlas': atlas_info, # use lapack_opt or blas_opt instead + 'atlas_threads': atlas_threads_info, # ditto + 'atlas_blas': atlas_blas_info, + 'atlas_blas_threads': atlas_blas_threads_info, + 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead + 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto + 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead + 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto + 'atlas_3_10_blas': atlas_3_10_blas_info, + 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, + 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead + 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto + 'flame': flame_info, # use lapack_opt instead + 'mkl': mkl_info, + 'ssl2': ssl2_info, + # openblas which may or may not have embedded lapack + 'openblas': openblas_info, # use blas_opt instead + # openblas with embedded lapack + 'openblas_lapack': openblas_lapack_info, # use blas_opt instead + 'openblas_clapack': openblas_clapack_info, # use blas_opt instead + 'blis': blis_info, # use blas_opt instead + 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead + 'blas_mkl': blas_mkl_info, # use blas_opt instead + 'lapack_ssl2': lapack_ssl2_info, + 'blas_ssl2': blas_ssl2_info, + 'accelerate': accelerate_info, # use blas_opt instead + 'accelerate_lapack': accelerate_lapack_info, + 'openblas64_': openblas64__info, + 'openblas64__lapack': openblas64__lapack_info, + 'openblas_ilp64': openblas_ilp64_info, + 'openblas_ilp64_lapack': openblas_ilp64_lapack_info, + 'x11': x11_info, + 'fft_opt': fft_opt_info, + 'fftw': fftw_info, + 'fftw2': fftw2_info, + 'fftw3': fftw3_info, + 'dfftw': dfftw_info, + 'sfftw': sfftw_info, + 'fftw_threads': fftw_threads_info, + 'dfftw_threads': dfftw_threads_info, + 'sfftw_threads': sfftw_threads_info, + 'djbfft': djbfft_info, + 'blas': blas_info, # use blas_opt instead + 'lapack': lapack_info, # use lapack_opt instead + 'lapack_src': lapack_src_info, + 'blas_src': blas_src_info, + 'numpy': numpy_info, + 'f2py': f2py_info, + 'Numeric': Numeric_info, + 'numeric': Numeric_info, + 'numarray': numarray_info, + 'numerix': numerix_info, + 'lapack_opt': lapack_opt_info, + 'lapack_ilp64_opt': lapack_ilp64_opt_info, + 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info, + 'lapack64__opt': lapack64__opt_info, + 'blas_opt': blas_opt_info, + 'blas_ilp64_opt': blas_ilp64_opt_info, + 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info, + 'blas64__opt': blas64__opt_info, + 'boost_python': boost_python_info, + 'agg2': agg2_info, + 'wx': wx_info, + 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, + 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, + 'gdk_pixbuf_2': gdk_pixbuf_2_info, + 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, + 'gdk': gdk_info, + 'gdk_2': gdk_2_info, + 'gdk-2.0': gdk_2_info, + 'gdk_x11_2': gdk_x11_2_info, + 'gdk-x11-2.0': gdk_x11_2_info, + 'gtkp_x11_2': gtkp_x11_2_info, + 'gtk+-x11-2.0': gtkp_x11_2_info, + 'gtkp_2': gtkp_2_info, + 'gtk+-2.0': gtkp_2_info, + 'xft': xft_info, + 'freetype2': freetype2_info, + 'umfpack': umfpack_info, + 'amd': amd_info, + }.get(name.lower(), system_info) + return cl().get_info(notfound_action) + + +class NotFoundError(DistutilsError): + """Some third-party program or library is not found.""" + + +class AliasedOptionError(DistutilsError): + """ + Aliases entries in config files should not be existing. + In section '{section}' we found multiple appearances of options {options}.""" + + +class AtlasNotFoundError(NotFoundError): + """ + Atlas (http://github.com/math-atlas/math-atlas) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [atlas]) or by setting + the ATLAS environment variable.""" + + +class FlameNotFoundError(NotFoundError): + """ + FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [flame]).""" + + +class LapackNotFoundError(NotFoundError): + """ + Lapack (http://www.netlib.org/lapack/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [lapack]) or by setting + the LAPACK environment variable.""" + + +class LapackSrcNotFoundError(LapackNotFoundError): + """ + Lapack (http://www.netlib.org/lapack/) sources not found. + Directories to search for the sources can be specified in the + numpy/distutils/site.cfg file (section [lapack_src]) or by setting + the LAPACK_SRC environment variable.""" + + +class LapackILP64NotFoundError(NotFoundError): + """ + 64-bit Lapack libraries not found. + Known libraries in numpy/distutils/site.cfg file are: + openblas64_, openblas_ilp64 + """ + +class BlasOptNotFoundError(NotFoundError): + """ + Optimized (vendor) Blas libraries are not found. + Falls back to netlib Blas library which has worse performance. + A better performance should be easily gained by switching + Blas library.""" + +class BlasNotFoundError(NotFoundError): + """ + Blas (http://www.netlib.org/blas/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [blas]) or by setting + the BLAS environment variable.""" + +class BlasILP64NotFoundError(NotFoundError): + """ + 64-bit Blas libraries not found. + Known libraries in numpy/distutils/site.cfg file are: + openblas64_, openblas_ilp64 + """ + +class BlasSrcNotFoundError(BlasNotFoundError): + """ + Blas (http://www.netlib.org/blas/) sources not found. + Directories to search for the sources can be specified in the + numpy/distutils/site.cfg file (section [blas_src]) or by setting + the BLAS_SRC environment variable.""" + + +class FFTWNotFoundError(NotFoundError): + """ + FFTW (http://www.fftw.org/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [fftw]) or by setting + the FFTW environment variable.""" + + +class DJBFFTNotFoundError(NotFoundError): + """ + DJBFFT (https://cr.yp.to/djbfft.html) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [djbfft]) or by setting + the DJBFFT environment variable.""" + + +class NumericNotFoundError(NotFoundError): + """ + Numeric (https://www.numpy.org/) module not found. + Get it from above location, install it, and retry setup.py.""" + + +class X11NotFoundError(NotFoundError): + """X11 libraries not found.""" + + +class UmfpackNotFoundError(NotFoundError): + """ + UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/) + not found. Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [umfpack]) or by setting + the UMFPACK environment variable.""" + + +class system_info: + + """ get_info() is the only public method. Don't use others. + """ + dir_env_var = None + # XXX: search_static_first is disabled by default, may disappear in + # future unless it is proved to be useful. + search_static_first = 0 + # The base-class section name is a random word "ALL" and is not really + # intended for general use. It cannot be None nor can it be DEFAULT as + # these break the ConfigParser. See gh-15338 + section = 'ALL' + saved_results = {} + + notfounderror = NotFoundError + + def __init__(self, + default_lib_dirs=default_lib_dirs, + default_include_dirs=default_include_dirs, + ): + self.__class__.info = {} + self.local_prefixes = [] + defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), + 'include_dirs': os.pathsep.join(default_include_dirs), + 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), + 'rpath': '', + 'src_dirs': os.pathsep.join(default_src_dirs), + 'search_static_first': str(self.search_static_first), + 'extra_compile_args': '', 'extra_link_args': ''} + self.cp = ConfigParser(defaults) + self.files = [] + self.files.extend(get_standard_file('.numpy-site.cfg')) + self.files.extend(get_standard_file('site.cfg')) + self.parse_config_files() + + if self.section is not None: + self.search_static_first = self.cp.getboolean( + self.section, 'search_static_first') + assert isinstance(self.search_static_first, int) + + def parse_config_files(self): + self.cp.read(self.files) + if not self.cp.has_section(self.section): + if self.section is not None: + self.cp.add_section(self.section) + + def calc_libraries_info(self): + libs = self.get_libraries() + dirs = self.get_lib_dirs() + # The extensions use runtime_library_dirs + r_dirs = self.get_runtime_lib_dirs() + # Intrinsic distutils use rpath, we simply append both entries + # as though they were one entry + r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) + info = {} + for lib in libs: + i = self.check_libs(dirs, [lib]) + if i is not None: + dict_append(info, **i) + else: + log.info('Library %s was not found. Ignoring' % (lib)) + + if r_dirs: + i = self.check_libs(r_dirs, [lib]) + if i is not None: + # Swap library keywords found to runtime_library_dirs + # the libraries are insisting on the user having defined + # them using the library_dirs, and not necessarily by + # runtime_library_dirs + del i['libraries'] + i['runtime_library_dirs'] = i.pop('library_dirs') + dict_append(info, **i) + else: + log.info('Runtime library %s was not found. Ignoring' % (lib)) + + return info + + def set_info(self, **info): + if info: + lib_info = self.calc_libraries_info() + dict_append(info, **lib_info) + # Update extra information + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + self.saved_results[self.__class__.__name__] = info + + def get_option_single(self, *options): + """ Ensure that only one of `options` are found in the section + + Parameters + ---------- + *options : list of str + a list of options to be found in the section (``self.section``) + + Returns + ------- + str : + the option that is uniquely found in the section + + Raises + ------ + AliasedOptionError : + in case more than one of the options are found + """ + found = [self.cp.has_option(self.section, opt) for opt in options] + if sum(found) == 1: + return options[found.index(True)] + elif sum(found) == 0: + # nothing is found anyways + return options[0] + + # Else we have more than 1 key found + if AliasedOptionError.__doc__ is None: + raise AliasedOptionError() + raise AliasedOptionError(AliasedOptionError.__doc__.format( + section=self.section, options='[{}]'.format(', '.join(options)))) + + + def has_info(self): + return self.__class__.__name__ in self.saved_results + + def calc_extra_info(self): + """ Updates the information in the current information with + respect to these flags: + extra_compile_args + extra_link_args + """ + info = {} + for key in ['extra_compile_args', 'extra_link_args']: + # Get values + opt = self.cp.get(self.section, key) + opt = _shell_utils.NativeParser.split(opt) + if opt: + tmp = {key: opt} + dict_append(info, **tmp) + return info + + def get_info(self, notfound_action=0): + """ Return a dictionary with items that are compatible + with numpy.distutils.setup keyword arguments. + """ + flag = 0 + if not self.has_info(): + flag = 1 + log.info(self.__class__.__name__ + ':') + if hasattr(self, 'calc_info'): + self.calc_info() + if notfound_action: + if not self.has_info(): + if notfound_action == 1: + warnings.warn(self.notfounderror.__doc__, stacklevel=2) + elif notfound_action == 2: + raise self.notfounderror(self.notfounderror.__doc__) + else: + raise ValueError(repr(notfound_action)) + + if not self.has_info(): + log.info(' NOT AVAILABLE') + self.set_info() + else: + log.info(' FOUND:') + + res = self.saved_results.get(self.__class__.__name__) + if log.get_threshold() <= log.INFO and flag: + for k, v in res.items(): + v = str(v) + if k in ['sources', 'libraries'] and len(v) > 270: + v = v[:120] + '...\n...\n...' + v[-120:] + log.info(' %s = %s', k, v) + log.info('') + + return copy.deepcopy(res) + + def get_paths(self, section, key): + dirs = self.cp.get(section, key).split(os.pathsep) + env_var = self.dir_env_var + if env_var: + if is_sequence(env_var): + e0 = env_var[-1] + for e in env_var: + if e in os.environ: + e0 = e + break + if not env_var[0] == e0: + log.info('Setting %s=%s' % (env_var[0], e0)) + env_var = e0 + if env_var and env_var in os.environ: + d = os.environ[env_var] + if d == 'None': + log.info('Disabled %s: %s', + self.__class__.__name__, '(%s is None)' + % (env_var,)) + return [] + if os.path.isfile(d): + dirs = [os.path.dirname(d)] + dirs + l = getattr(self, '_lib_names', []) + if len(l) == 1: + b = os.path.basename(d) + b = os.path.splitext(b)[0] + if b[:3] == 'lib': + log.info('Replacing _lib_names[0]==%r with %r' \ + % (self._lib_names[0], b[3:])) + self._lib_names[0] = b[3:] + else: + ds = d.split(os.pathsep) + ds2 = [] + for d in ds: + if os.path.isdir(d): + ds2.append(d) + for dd in ['include', 'lib']: + d1 = os.path.join(d, dd) + if os.path.isdir(d1): + ds2.append(d1) + dirs = ds2 + dirs + default_dirs = self.cp.get(self.section, key).split(os.pathsep) + dirs.extend(default_dirs) + ret = [] + for d in dirs: + if len(d) > 0 and not os.path.isdir(d): + warnings.warn('Specified path %s is invalid.' % d, stacklevel=2) + continue + + if d not in ret: + ret.append(d) + + log.debug('( %s = %s )', key, ':'.join(ret)) + return ret + + def get_lib_dirs(self, key='library_dirs'): + return self.get_paths(self.section, key) + + def get_runtime_lib_dirs(self, key='runtime_library_dirs'): + path = self.get_paths(self.section, key) + if path == ['']: + path = [] + return path + + def get_include_dirs(self, key='include_dirs'): + return self.get_paths(self.section, key) + + def get_src_dirs(self, key='src_dirs'): + return self.get_paths(self.section, key) + + def get_libs(self, key, default): + try: + libs = self.cp.get(self.section, key) + except NoOptionError: + if not default: + return [] + if is_string(default): + return [default] + return default + return [b for b in [a.strip() for a in libs.split(',')] if b] + + def get_libraries(self, key='libraries'): + if hasattr(self, '_lib_names'): + return self.get_libs(key, default=self._lib_names) + else: + return self.get_libs(key, '') + + def library_extensions(self): + c = customized_ccompiler() + static_exts = [] + if c.compiler_type != 'msvc': + # MSVC doesn't understand binutils + static_exts.append('.a') + if sys.platform == 'win32': + static_exts.append('.lib') # .lib is used by MSVC and others + if self.search_static_first: + exts = static_exts + [so_ext] + else: + exts = [so_ext] + static_exts + if sys.platform == 'cygwin': + exts.append('.dll.a') + if sys.platform == 'darwin': + exts.append('.dylib') + return exts + + def check_libs(self, lib_dirs, libs, opt_libs=[]): + """If static or shared libraries are available then return + their info dictionary. + + Checks for all libraries as shared libraries first, then + static (or vice versa if self.search_static_first is True). + """ + exts = self.library_extensions() + info = None + for ext in exts: + info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) + if info is not None: + break + if not info: + log.info(' libraries %s not found in %s', ','.join(libs), + lib_dirs) + return info + + def check_libs2(self, lib_dirs, libs, opt_libs=[]): + """If static or shared libraries are available then return + their info dictionary. + + Checks each library for shared or static. + """ + exts = self.library_extensions() + info = self._check_libs(lib_dirs, libs, opt_libs, exts) + if not info: + log.info(' libraries %s not found in %s', ','.join(libs), + lib_dirs) + + return info + + def _find_lib(self, lib_dir, lib, exts): + assert is_string(lib_dir) + # under windows first try without 'lib' prefix + if sys.platform == 'win32': + lib_prefixes = ['', 'lib'] + else: + lib_prefixes = ['lib'] + # for each library name, see if we can find a file for it. + for ext in exts: + for prefix in lib_prefixes: + p = self.combine_paths(lib_dir, prefix + lib + ext) + if p: + break + if p: + assert len(p) == 1 + # ??? splitext on p[0] would do this for cygwin + # doesn't seem correct + if ext == '.dll.a': + lib += '.dll' + if ext == '.lib': + lib = prefix + lib + return lib + + return False + + def _find_libs(self, lib_dirs, libs, exts): + # make sure we preserve the order of libs, as it can be important + found_dirs, found_libs = [], [] + for lib in libs: + for lib_dir in lib_dirs: + found_lib = self._find_lib(lib_dir, lib, exts) + if found_lib: + found_libs.append(found_lib) + if lib_dir not in found_dirs: + found_dirs.append(lib_dir) + break + return found_dirs, found_libs + + def _check_libs(self, lib_dirs, libs, opt_libs, exts): + """Find mandatory and optional libs in expected paths. + + Missing optional libraries are silently forgotten. + """ + if not is_sequence(lib_dirs): + lib_dirs = [lib_dirs] + # First, try to find the mandatory libraries + found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts) + if len(found_libs) > 0 and len(found_libs) == len(libs): + # Now, check for optional libraries + opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts) + found_libs.extend(opt_found_libs) + for lib_dir in opt_found_dirs: + if lib_dir not in found_dirs: + found_dirs.append(lib_dir) + info = {'libraries': found_libs, 'library_dirs': found_dirs} + return info + else: + return None + + def combine_paths(self, *args): + """Return a list of existing paths composed by all combinations + of items from the arguments. + """ + return combine_paths(*args) + + +class fft_opt_info(system_info): + + def calc_info(self): + info = {} + fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') + djbfft_info = get_info('djbfft') + if fftw_info: + dict_append(info, **fftw_info) + if djbfft_info: + dict_append(info, **djbfft_info) + self.set_info(**info) + return + + +class fftw_info(system_info): + #variables to override + section = 'fftw' + dir_env_var = 'FFTW' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw3', + 'libs':['fftw3'], + 'includes':['fftw3.h'], + 'macros':[('SCIPY_FFTW3_H', None)]}, + {'name':'fftw2', + 'libs':['rfftw', 'fftw'], + 'includes':['fftw.h', 'rfftw.h'], + 'macros':[('SCIPY_FFTW_H', None)]}] + + def calc_ver_info(self, ver_param): + """Returns True on successful version detection, else False""" + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + + opt = self.get_option_single(self.section + '_libs', 'libraries') + libs = self.get_libs(opt, ver_param['libs']) + info = self.check_libs(lib_dirs, libs) + if info is not None: + flag = 0 + for d in incl_dirs: + if len(self.combine_paths(d, ver_param['includes'])) \ + == len(ver_param['includes']): + dict_append(info, include_dirs=[d]) + flag = 1 + break + if flag: + dict_append(info, define_macros=ver_param['macros']) + else: + info = None + if info is not None: + self.set_info(**info) + return True + else: + log.info(' %s not found' % (ver_param['name'])) + return False + + def calc_info(self): + for i in self.ver_info: + if self.calc_ver_info(i): + break + + +class fftw2_info(fftw_info): + #variables to override + section = 'fftw' + dir_env_var = 'FFTW' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw2', + 'libs':['rfftw', 'fftw'], + 'includes':['fftw.h', 'rfftw.h'], + 'macros':[('SCIPY_FFTW_H', None)]} + ] + + +class fftw3_info(fftw_info): + #variables to override + section = 'fftw3' + dir_env_var = 'FFTW3' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw3', + 'libs':['fftw3'], + 'includes':['fftw3.h'], + 'macros':[('SCIPY_FFTW3_H', None)]}, + ] + + +class fftw3_armpl_info(fftw_info): + section = 'fftw3' + dir_env_var = 'ARMPL_DIR' + notfounderror = FFTWNotFoundError + ver_info = [{'name': 'fftw3', + 'libs': ['armpl_lp64_mp'], + 'includes': ['fftw3.h'], + 'macros': [('SCIPY_FFTW3_H', None)]}] + + +class dfftw_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'dfftw', + 'libs':['drfftw', 'dfftw'], + 'includes':['dfftw.h', 'drfftw.h'], + 'macros':[('SCIPY_DFFTW_H', None)]}] + + +class sfftw_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'sfftw', + 'libs':['srfftw', 'sfftw'], + 'includes':['sfftw.h', 'srfftw.h'], + 'macros':[('SCIPY_SFFTW_H', None)]}] + + +class fftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'fftw threads', + 'libs':['rfftw_threads', 'fftw_threads'], + 'includes':['fftw_threads.h', 'rfftw_threads.h'], + 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] + + +class dfftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'dfftw threads', + 'libs':['drfftw_threads', 'dfftw_threads'], + 'includes':['dfftw_threads.h', 'drfftw_threads.h'], + 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] + + +class sfftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'sfftw threads', + 'libs':['srfftw_threads', 'sfftw_threads'], + 'includes':['sfftw_threads.h', 'srfftw_threads.h'], + 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] + + +class djbfft_info(system_info): + section = 'djbfft' + dir_env_var = 'DJBFFT' + notfounderror = DJBFFTNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + info = None + for d in lib_dirs: + p = self.combine_paths(d, ['djbfft.a']) + if p: + info = {'extra_objects': p} + break + p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) + if p: + info = {'libraries': ['djbfft'], 'library_dirs': [d]} + break + if info is None: + return + for d in incl_dirs: + if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: + dict_append(info, include_dirs=[d], + define_macros=[('SCIPY_DJBFFT_H', None)]) + self.set_info(**info) + return + return + + +class mkl_info(system_info): + section = 'mkl' + dir_env_var = 'MKLROOT' + _lib_mkl = ['mkl_rt'] + + def get_mkl_rootdir(self): + mklroot = os.environ.get('MKLROOT', None) + if mklroot is not None: + return mklroot + paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) + ld_so_conf = '/etc/ld.so.conf' + if os.path.isfile(ld_so_conf): + with open(ld_so_conf) as f: + for d in f: + d = d.strip() + if d: + paths.append(d) + intel_mkl_dirs = [] + for path in paths: + path_atoms = path.split(os.sep) + for m in path_atoms: + if m.startswith('mkl'): + d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) + intel_mkl_dirs.append(d) + break + for d in paths: + dirs = glob(os.path.join(d, 'mkl', '*')) + dirs += glob(os.path.join(d, 'mkl*')) + for sub_dir in dirs: + if os.path.isdir(os.path.join(sub_dir, 'lib')): + return sub_dir + return None + + def __init__(self): + mklroot = self.get_mkl_rootdir() + if mklroot is None: + system_info.__init__(self) + else: + from .cpuinfo import cpu + if cpu.is_Itanium(): + plt = '64' + elif cpu.is_Intel() and cpu.is_64bit(): + plt = 'intel64' + else: + plt = '32' + system_info.__init__( + self, + default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], + default_include_dirs=[os.path.join(mklroot, 'include')]) + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + opt = self.get_option_single('mkl_libs', 'libraries') + mkl_libs = self.get_libs(opt, self._lib_mkl) + info = self.check_libs2(lib_dirs, mkl_libs) + if info is None: + return + dict_append(info, + define_macros=[('SCIPY_MKL_H', None), + ('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + if sys.platform == 'win32': + pass # win32 has no pthread library + else: + dict_append(info, libraries=['pthread']) + self.set_info(**info) + + +class lapack_mkl_info(mkl_info): + pass + + +class blas_mkl_info(mkl_info): + pass + + +class ssl2_info(system_info): + section = 'ssl2' + dir_env_var = 'SSL2_DIR' + # Multi-threaded version. Python itself must be built by Fujitsu compiler. + _lib_ssl2 = ['fjlapackexsve'] + # Single-threaded version + #_lib_ssl2 = ['fjlapacksve'] + + def get_tcsds_rootdir(self): + tcsdsroot = os.environ.get('TCSDS_PATH', None) + if tcsdsroot is not None: + return tcsdsroot + return None + + def __init__(self): + tcsdsroot = self.get_tcsds_rootdir() + if tcsdsroot is None: + system_info.__init__(self) + else: + system_info.__init__( + self, + default_lib_dirs=[os.path.join(tcsdsroot, 'lib64')], + default_include_dirs=[os.path.join(tcsdsroot, + 'clang-comp/include')]) + + def calc_info(self): + tcsdsroot = self.get_tcsds_rootdir() + + lib_dirs = self.get_lib_dirs() + if lib_dirs is None: + lib_dirs = os.path.join(tcsdsroot, 'lib64') + + incl_dirs = self.get_include_dirs() + if incl_dirs is None: + incl_dirs = os.path.join(tcsdsroot, 'clang-comp/include') + + ssl2_libs = self.get_libs('ssl2_libs', self._lib_ssl2) + + info = self.check_libs2(lib_dirs, ssl2_libs) + if info is None: + return + dict_append(info, + define_macros=[('HAVE_CBLAS', None), + ('HAVE_SSL2', 1)], + include_dirs=incl_dirs,) + self.set_info(**info) + + +class lapack_ssl2_info(ssl2_info): + pass + + +class blas_ssl2_info(ssl2_info): + pass + + + +class armpl_info(system_info): + section = 'armpl' + dir_env_var = 'ARMPL_DIR' + _lib_armpl = ['armpl_lp64_mp'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + armpl_libs = self.get_libs('armpl_libs', self._lib_armpl) + info = self.check_libs2(lib_dirs, armpl_libs) + if info is None: + return + dict_append(info, + define_macros=[('SCIPY_MKL_H', None), + ('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + self.set_info(**info) + +class lapack_armpl_info(armpl_info): + pass + +class blas_armpl_info(armpl_info): + pass + + +class atlas_info(system_info): + section = 'atlas' + dir_env_var = 'ATLAS' + _lib_names = ['f77blas', 'cblas'] + if sys.platform[:7] == 'freebsd': + _lib_atlas = ['atlas_r'] + _lib_lapack = ['alapack_r'] + else: + _lib_atlas = ['atlas'] + _lib_lapack = ['lapack'] + + notfounderror = AtlasNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', + 'sse', '3dnow', 'sse2']) + [d]) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + opt = self.get_option_single('atlas_libs', 'libraries') + atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) + lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) + atlas = None + lapack = None + atlas_1 = None + for d in lib_dirs: + atlas = self.check_libs2(d, atlas_libs, []) + if atlas is not None: + lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) + lapack = self.check_libs2(lib_dirs2, lapack_libs, []) + if lapack is not None: + break + if atlas: + atlas_1 = atlas + log.info(self.__class__) + if atlas is None: + atlas = atlas_1 + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + if lapack is not None: + dict_append(info, **lapack) + dict_append(info, **atlas) + elif 'lapack_atlas' in atlas['libraries']: + dict_append(info, **atlas) + dict_append(info, + define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) + self.set_info(**info) + return + else: + dict_append(info, **atlas) + dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) + message = textwrap.dedent(""" + ********************************************************************* + Could not find lapack library within the ATLAS installation. + ********************************************************************* + """) + warnings.warn(message, stacklevel=2) + self.set_info(**info) + return + + # Check if lapack library is complete, only warn if it is not. + lapack_dir = lapack['library_dirs'][0] + lapack_name = lapack['libraries'][0] + lapack_lib = None + lib_prefixes = ['lib'] + if sys.platform == 'win32': + lib_prefixes.append('') + for e in self.library_extensions(): + for prefix in lib_prefixes: + fn = os.path.join(lapack_dir, prefix + lapack_name + e) + if os.path.exists(fn): + lapack_lib = fn + break + if lapack_lib: + break + if lapack_lib is not None: + sz = os.stat(lapack_lib)[6] + if sz <= 4000 * 1024: + message = textwrap.dedent(""" + ********************************************************************* + Lapack library (from ATLAS) is probably incomplete: + size of %s is %sk (expected >4000k) + + Follow the instructions in the KNOWN PROBLEMS section of the file + numpy/INSTALL.txt. + ********************************************************************* + """) % (lapack_lib, sz / 1024) + warnings.warn(message, stacklevel=2) + else: + info['language'] = 'f77' + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(info, **atlas_extra_info) + + self.set_info(**info) + + +class atlas_blas_info(atlas_info): + _lib_names = ['f77blas', 'cblas'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + opt = self.get_option_single('atlas_libs', 'libraries') + atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) + atlas = self.check_libs2(lib_dirs, atlas_libs, []) + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(atlas, **atlas_extra_info) + + dict_append(info, **atlas) + + self.set_info(**info) + return + + +class atlas_threads_info(atlas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['ptf77blas', 'ptcblas'] + + +class atlas_blas_threads_info(atlas_blas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['ptf77blas', 'ptcblas'] + + +class lapack_atlas_info(atlas_info): + _lib_names = ['lapack_atlas'] + atlas_info._lib_names + + +class lapack_atlas_threads_info(atlas_threads_info): + _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names + + +class atlas_3_10_info(atlas_info): + _lib_names = ['satlas'] + _lib_atlas = _lib_names + _lib_lapack = _lib_names + + +class atlas_3_10_blas_info(atlas_3_10_info): + _lib_names = ['satlas'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + opt = self.get_option_single('atlas_lib', 'libraries') + atlas_libs = self.get_libs(opt, self._lib_names) + atlas = self.check_libs2(lib_dirs, atlas_libs, []) + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(atlas, **atlas_extra_info) + + dict_append(info, **atlas) + + self.set_info(**info) + return + + +class atlas_3_10_threads_info(atlas_3_10_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['tatlas'] + _lib_atlas = _lib_names + _lib_lapack = _lib_names + + +class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['tatlas'] + + +class lapack_atlas_3_10_info(atlas_3_10_info): + pass + + +class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): + pass + + +class lapack_info(system_info): + section = 'lapack' + dir_env_var = 'LAPACK' + _lib_names = ['lapack'] + notfounderror = LapackNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + opt = self.get_option_single('lapack_libs', 'libraries') + lapack_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, lapack_libs, []) + if info is None: + return + info['language'] = 'f77' + self.set_info(**info) + + +class lapack_src_info(system_info): + # LAPACK_SRC is deprecated, please do not use this! + # Build or install a BLAS library via your package manager or from + # source separately. + section = 'lapack_src' + dir_env_var = 'LAPACK_SRC' + notfounderror = LapackSrcNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'dgesv.f')): + src_dir = d + break + if not src_dir: + #XXX: Get sources from netlib. May be ask first. + return + # The following is extracted from LAPACK-3.0/SRC/Makefile. + # Added missing names from lapack-lite-3.1.1/SRC/Makefile + # while keeping removed names for Lapack-3.0 compatibility. + allaux = ''' + ilaenv ieeeck lsame lsamen xerbla + iparmq + ''' # *.f + laux = ''' + bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 + laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 + lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre + larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 + lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 + lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf + stebz stedc steqr sterf + + larra larrc larrd larr larrk larrj larrr laneg laisnan isnan + lazq3 lazq4 + ''' # [s|d]*.f + lasrc = ''' + gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak + gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv + gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 + geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd + gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal + gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd + ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein + hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 + lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb + lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp + laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv + lartv larz larzb larzt laswp lasyf latbs latdf latps latrd + latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv + pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 + potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri + pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs + spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv + sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 + tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs + trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs + tzrqf tzrzf + + lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 + ''' # [s|c|d|z]*.f + sd_lasrc = ''' + laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l + org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr + orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 + ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx + sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd + stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd + sygvx sytd2 sytrd + ''' # [s|d]*.f + cz_lasrc = ''' + bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev + heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv + hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd + hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf + hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 + laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe + laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv + spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq + ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 + unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr + ''' # [c|z]*.f + ####### + sclaux = laux + ' econd ' # s*.f + dzlaux = laux + ' secnd ' # d*.f + slasrc = lasrc + sd_lasrc # s*.f + dlasrc = lasrc + sd_lasrc # d*.f + clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f + zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f + oclasrc = ' icmax1 scsum1 ' # *.f + ozlasrc = ' izmax1 dzsum1 ' # *.f + sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ + + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ + + ['c%s.f' % f for f in (clasrc).split()] \ + + ['z%s.f' % f for f in (zlasrc).split()] \ + + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] + sources = [os.path.join(src_dir, f) for f in sources] + # Lapack 3.1: + src_dir2 = os.path.join(src_dir, '..', 'INSTALL') + sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] + # Lapack 3.2.1: + sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] + sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] + sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] + # Should we check here actual existence of source files? + # Yes, the file listing is different between 3.0 and 3.1 + # versions. + sources = [f for f in sources if os.path.isfile(f)] + info = {'sources': sources, 'language': 'f77'} + self.set_info(**info) + +atlas_version_c_text = r''' +/* This file is generated from numpy/distutils/system_info.py */ +void ATL_buildinfo(void); +int main(void) { + ATL_buildinfo(); + return 0; +} +''' + +_cached_atlas_version = {} + + +def get_atlas_version(**config): + libraries = config.get('libraries', []) + library_dirs = config.get('library_dirs', []) + key = (tuple(libraries), tuple(library_dirs)) + if key in _cached_atlas_version: + return _cached_atlas_version[key] + c = cmd_config(Distribution()) + atlas_version = None + info = {} + try: + s, o = c.get_output(atlas_version_c_text, + libraries=libraries, library_dirs=library_dirs, + ) + if s and re.search(r'undefined reference to `_gfortran', o, re.M): + s, o = c.get_output(atlas_version_c_text, + libraries=libraries + ['gfortran'], + library_dirs=library_dirs, + ) + if not s: + warnings.warn(textwrap.dedent(""" + ***************************************************** + Linkage with ATLAS requires gfortran. Use + + python setup.py config_fc --fcompiler=gnu95 ... + + when building extension libraries that use ATLAS. + Make sure that -lgfortran is used for C++ extensions. + ***************************************************** + """), stacklevel=2) + dict_append(info, language='f90', + define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) + except Exception: # failed to get version from file -- maybe on Windows + # look at directory name + for o in library_dirs: + m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) + if m: + atlas_version = m.group('version') + if atlas_version is not None: + break + + # final choice --- look at ATLAS_VERSION environment + # variable + if atlas_version is None: + atlas_version = os.environ.get('ATLAS_VERSION', None) + if atlas_version: + dict_append(info, define_macros=[( + 'ATLAS_INFO', _c_string_literal(atlas_version)) + ]) + else: + dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) + return atlas_version or '?.?.?', info + + if not s: + m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) + if m: + atlas_version = m.group('version') + if atlas_version is None: + if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): + atlas_version = '3.2.1_pre3.3.6' + else: + log.info('Status: %d', s) + log.info('Output: %s', o) + + elif atlas_version == '3.2.1_pre3.3.6': + dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) + else: + dict_append(info, define_macros=[( + 'ATLAS_INFO', _c_string_literal(atlas_version)) + ]) + result = _cached_atlas_version[key] = atlas_version, info + return result + + +class lapack_opt_info(system_info): + notfounderror = LapackNotFoundError + + # List of all known LAPACK libraries, in the default order + lapack_order = ['armpl', 'mkl', 'ssl2', 'openblas', 'flame', + 'accelerate', 'atlas', 'lapack'] + order_env_var_name = 'NPY_LAPACK_ORDER' + + def _calc_info_armpl(self): + info = get_info('lapack_armpl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_mkl(self): + info = get_info('lapack_mkl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_ssl2(self): + info = get_info('lapack_ssl2') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_openblas(self): + info = get_info('openblas_lapack') + if info: + self.set_info(**info) + return True + info = get_info('openblas_clapack') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_flame(self): + info = get_info('flame') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_atlas(self): + info = get_info('atlas_3_10_threads') + if not info: + info = get_info('atlas_3_10') + if not info: + info = get_info('atlas_threads') + if not info: + info = get_info('atlas') + if info: + # Figure out if ATLAS has lapack... + # If not we need the lapack library, but not BLAS! + l = info.get('define_macros', []) + if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ + or ('ATLAS_WITHOUT_LAPACK', None) in l: + # Get LAPACK (with possible warnings) + # If not found we don't accept anything + # since we can't use ATLAS with LAPACK! + lapack_info = self._get_info_lapack() + if not lapack_info: + return False + dict_append(info, **lapack_info) + self.set_info(**info) + return True + return False + + def _calc_info_accelerate(self): + info = get_info('accelerate') + if info: + self.set_info(**info) + return True + return False + + def _get_info_blas(self): + # Default to get the optimized BLAS implementation + info = get_info('blas_opt') + if not info: + warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) + info_src = get_info('blas_src') + if not info_src: + warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) + return {} + dict_append(info, libraries=[('fblas_src', info_src)]) + return info + + def _get_info_lapack(self): + info = get_info('lapack') + if not info: + warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3) + info_src = get_info('lapack_src') + if not info_src: + warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3) + return {} + dict_append(info, libraries=[('flapack_src', info_src)]) + return info + + def _calc_info_lapack(self): + info = self._get_info_lapack() + if info: + info_blas = self._get_info_blas() + dict_append(info, **info_blas) + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + self.set_info(**info) + return True + return False + + def _calc_info_from_envvar(self): + info = {} + info['language'] = 'f77' + info['libraries'] = [] + info['include_dirs'] = [] + info['define_macros'] = [] + info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split() + self.set_info(**info) + return True + + def _calc_info(self, name): + return getattr(self, '_calc_info_{}'.format(name))() + + def calc_info(self): + lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name) + if len(unknown_order) > 0: + raise ValueError("lapack_opt_info user defined " + "LAPACK order has unacceptable " + "values: {}".format(unknown_order)) + + if 'NPY_LAPACK_LIBS' in os.environ: + # Bypass autodetection, set language to F77 and use env var linker + # flags directly + self._calc_info_from_envvar() + return + + for lapack in lapack_order: + if self._calc_info(lapack): + return + + if 'lapack' not in lapack_order: + # Since the user may request *not* to use any library, we still need + # to raise warnings to signal missing packages! + warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2) + warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2) + + +class _ilp64_opt_info_mixin: + symbol_suffix = None + symbol_prefix = None + + def _check_info(self, info): + macros = dict(info.get('define_macros', [])) + prefix = macros.get('BLAS_SYMBOL_PREFIX', '') + suffix = macros.get('BLAS_SYMBOL_SUFFIX', '') + + if self.symbol_prefix not in (None, prefix): + return False + + if self.symbol_suffix not in (None, suffix): + return False + + return bool(info) + + +class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin): + notfounderror = LapackILP64NotFoundError + lapack_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] + order_env_var_name = 'NPY_LAPACK_ILP64_ORDER' + + def _calc_info(self, name): + print('lapack_ilp64_opt_info._calc_info(name=%s)' % (name)) + info = get_info(name + '_lapack') + if self._check_info(info): + self.set_info(**info) + return True + else: + print('%s_lapack does not exist' % (name)) + return False + + +class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info): + # Same as lapack_ilp64_opt_info, but fix symbol names + symbol_prefix = '' + symbol_suffix = '' + + +class lapack64__opt_info(lapack_ilp64_opt_info): + symbol_prefix = '' + symbol_suffix = '64_' + + +class blas_opt_info(system_info): + notfounderror = BlasNotFoundError + # List of all known BLAS libraries, in the default order + + blas_order = ['armpl', 'mkl', 'ssl2', 'blis', 'openblas', + 'accelerate', 'atlas', 'blas'] + order_env_var_name = 'NPY_BLAS_ORDER' + + def _calc_info_armpl(self): + info = get_info('blas_armpl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_mkl(self): + info = get_info('blas_mkl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_ssl2(self): + info = get_info('blas_ssl2') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_blis(self): + info = get_info('blis') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_openblas(self): + info = get_info('openblas') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_atlas(self): + info = get_info('atlas_3_10_blas_threads') + if not info: + info = get_info('atlas_3_10_blas') + if not info: + info = get_info('atlas_blas_threads') + if not info: + info = get_info('atlas_blas') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_accelerate(self): + info = get_info('accelerate') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_blas(self): + # Warn about a non-optimized BLAS library + warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) + info = {} + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + + blas = get_info('blas') + if blas: + dict_append(info, **blas) + else: + # Not even BLAS was found! + warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) + + blas_src = get_info('blas_src') + if not blas_src: + warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) + return False + dict_append(info, libraries=[('fblas_src', blas_src)]) + + self.set_info(**info) + return True + + def _calc_info_from_envvar(self): + info = {} + info['language'] = 'f77' + info['libraries'] = [] + info['include_dirs'] = [] + info['define_macros'] = [] + info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split() + if 'NPY_CBLAS_LIBS' in os.environ: + info['define_macros'].append(('HAVE_CBLAS', None)) + info['extra_link_args'].extend( + os.environ['NPY_CBLAS_LIBS'].split()) + self.set_info(**info) + return True + + def _calc_info(self, name): + return getattr(self, '_calc_info_{}'.format(name))() + + def calc_info(self): + blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name) + if len(unknown_order) > 0: + raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order)) + + if 'NPY_BLAS_LIBS' in os.environ: + # Bypass autodetection, set language to F77 and use env var linker + # flags directly + self._calc_info_from_envvar() + return + + for blas in blas_order: + if self._calc_info(blas): + return + + if 'blas' not in blas_order: + # Since the user may request *not* to use any library, we still need + # to raise warnings to signal missing packages! + warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2) + warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2) + + +class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin): + notfounderror = BlasILP64NotFoundError + blas_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] + order_env_var_name = 'NPY_BLAS_ILP64_ORDER' + + def _calc_info(self, name): + info = get_info(name) + if self._check_info(info): + self.set_info(**info) + return True + return False + + +class blas_ilp64_plain_opt_info(blas_ilp64_opt_info): + symbol_prefix = '' + symbol_suffix = '' + + +class blas64__opt_info(blas_ilp64_opt_info): + symbol_prefix = '' + symbol_suffix = '64_' + + +class cblas_info(system_info): + section = 'cblas' + dir_env_var = 'CBLAS' + # No default as it's used only in blas_info + _lib_names = [] + notfounderror = BlasNotFoundError + + +class blas_info(system_info): + section = 'blas' + dir_env_var = 'BLAS' + _lib_names = ['blas'] + notfounderror = BlasNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + opt = self.get_option_single('blas_libs', 'libraries') + blas_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, blas_libs, []) + if info is None: + return + else: + info['include_dirs'] = self.get_include_dirs() + if platform.system() == 'Windows': + # The check for windows is needed because get_cblas_libs uses the + # same compiler that was used to compile Python and msvc is + # often not installed when mingw is being used. This rough + # treatment is not desirable, but windows is tricky. + info['language'] = 'f77' # XXX: is it generally true? + # If cblas is given as an option, use those + cblas_info_obj = cblas_info() + cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries') + cblas_libs = cblas_info_obj.get_libs(cblas_opt, None) + if cblas_libs: + info['libraries'] = cblas_libs + blas_libs + info['define_macros'] = [('HAVE_CBLAS', None)] + else: + lib = self.get_cblas_libs(info) + if lib is not None: + info['language'] = 'c' + info['libraries'] = lib + info['define_macros'] = [('HAVE_CBLAS', None)] + self.set_info(**info) + + def get_cblas_libs(self, info): + """ Check whether we can link with CBLAS interface + + This method will search through several combinations of libraries + to check whether CBLAS is present: + + 1. Libraries in ``info['libraries']``, as is + 2. As 1. but also explicitly adding ``'cblas'`` as a library + 3. As 1. but also explicitly adding ``'blas'`` as a library + 4. Check only library ``'cblas'`` + 5. Check only library ``'blas'`` + + Parameters + ---------- + info : dict + system information dictionary for compilation and linking + + Returns + ------- + libraries : list of str or None + a list of libraries that enables the use of CBLAS interface. + Returns None if not found or a compilation error occurs. + + Since 1.17 returns a list. + """ + # primitive cblas check by looking for the header and trying to link + # cblas or blas + c = customized_ccompiler() + tmpdir = tempfile.mkdtemp() + s = textwrap.dedent("""\ + #include + int main(int argc, const char *argv[]) + { + double a[4] = {1,2,3,4}; + double b[4] = {5,6,7,8}; + return cblas_ddot(4, a, 1, b, 1) > 10; + }""") + src = os.path.join(tmpdir, 'source.c') + try: + with open(src, 'w') as f: + f.write(s) + + try: + # check we can compile (find headers) + obj = c.compile([src], output_dir=tmpdir, + include_dirs=self.get_include_dirs()) + except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): + return None + + # check we can link (find library) + # some systems have separate cblas and blas libs. + for libs in [info['libraries'], ['cblas'] + info['libraries'], + ['blas'] + info['libraries'], ['cblas'], ['blas']]: + try: + c.link_executable(obj, os.path.join(tmpdir, "a.out"), + libraries=libs, + library_dirs=info['library_dirs'], + extra_postargs=info.get('extra_link_args', [])) + return libs + except distutils.ccompiler.LinkError: + pass + finally: + shutil.rmtree(tmpdir) + return None + + +class openblas_info(blas_info): + section = 'openblas' + dir_env_var = 'OPENBLAS' + _lib_names = ['openblas'] + _require_symbols = [] + notfounderror = BlasNotFoundError + + @property + def symbol_prefix(self): + try: + return self.cp.get(self.section, 'symbol_prefix') + except NoOptionError: + return '' + + @property + def symbol_suffix(self): + try: + return self.cp.get(self.section, 'symbol_suffix') + except NoOptionError: + return '' + + def _calc_info(self): + c = customized_ccompiler() + + lib_dirs = self.get_lib_dirs() + + # Prefer to use libraries over openblas_libs + opt = self.get_option_single('openblas_libs', 'libraries') + openblas_libs = self.get_libs(opt, self._lib_names) + + info = self.check_libs(lib_dirs, openblas_libs, []) + + if c.compiler_type == "msvc" and info is None: + from numpy.distutils.fcompiler import new_fcompiler + f = new_fcompiler(c_compiler=c) + if f and f.compiler_type == 'gnu95': + # Try gfortran-compatible library files + info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) + # Skip lapack check, we'd need build_ext to do it + skip_symbol_check = True + elif info: + skip_symbol_check = False + info['language'] = 'c' + + if info is None: + return None + + # Add extra info for OpenBLAS + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + + if not (skip_symbol_check or self.check_symbols(info)): + return None + + info['define_macros'] = [('HAVE_CBLAS', None)] + if self.symbol_prefix: + info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)] + if self.symbol_suffix: + info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)] + + return info + + def calc_info(self): + info = self._calc_info() + if info is not None: + self.set_info(**info) + + def check_msvc_gfortran_libs(self, library_dirs, libraries): + # First, find the full path to each library directory + library_paths = [] + for library in libraries: + for library_dir in library_dirs: + # MinGW static ext will be .a + fullpath = os.path.join(library_dir, library + '.a') + if os.path.isfile(fullpath): + library_paths.append(fullpath) + break + else: + return None + + # Generate numpy.distutils virtual static library file + basename = self.__class__.__name__ + tmpdir = os.path.join(os.getcwd(), 'build', basename) + if not os.path.isdir(tmpdir): + os.makedirs(tmpdir) + + info = {'library_dirs': [tmpdir], + 'libraries': [basename], + 'language': 'f77'} + + fake_lib_file = os.path.join(tmpdir, basename + '.fobjects') + fake_clib_file = os.path.join(tmpdir, basename + '.cobjects') + with open(fake_lib_file, 'w') as f: + f.write("\n".join(library_paths)) + with open(fake_clib_file, 'w') as f: + pass + + return info + + def check_symbols(self, info): + res = False + c = customized_ccompiler() + + tmpdir = tempfile.mkdtemp() + + prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix, + symbol_name, + self.symbol_suffix) + for symbol_name in self._require_symbols) + calls = "\n".join("%s%s%s();" % (self.symbol_prefix, + symbol_name, + self.symbol_suffix) + for symbol_name in self._require_symbols) + s = textwrap.dedent("""\ + %(prototypes)s + int main(int argc, const char *argv[]) + { + %(calls)s + return 0; + }""") % dict(prototypes=prototypes, calls=calls) + src = os.path.join(tmpdir, 'source.c') + out = os.path.join(tmpdir, 'a.out') + # Add the additional "extra" arguments + try: + extra_args = info['extra_link_args'] + except Exception: + extra_args = [] + try: + with open(src, 'w') as f: + f.write(s) + obj = c.compile([src], output_dir=tmpdir) + try: + c.link_executable(obj, out, libraries=info['libraries'], + library_dirs=info['library_dirs'], + extra_postargs=extra_args) + res = True + except distutils.ccompiler.LinkError: + res = False + finally: + shutil.rmtree(tmpdir) + return res + +class openblas_lapack_info(openblas_info): + section = 'openblas' + dir_env_var = 'OPENBLAS' + _lib_names = ['openblas'] + _require_symbols = ['zungqr_'] + notfounderror = BlasNotFoundError + +class openblas_clapack_info(openblas_lapack_info): + _lib_names = ['openblas', 'lapack'] + +class openblas_ilp64_info(openblas_info): + section = 'openblas_ilp64' + dir_env_var = 'OPENBLAS_ILP64' + _lib_names = ['openblas64'] + _require_symbols = ['dgemm_', 'cblas_dgemm'] + notfounderror = BlasILP64NotFoundError + + def _calc_info(self): + info = super()._calc_info() + if info is not None: + info['define_macros'] += [('HAVE_BLAS_ILP64', None)] + return info + +class openblas_ilp64_lapack_info(openblas_ilp64_info): + _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr'] + + def _calc_info(self): + info = super()._calc_info() + if info: + info['define_macros'] += [('HAVE_LAPACKE', None)] + return info + +class openblas64__info(openblas_ilp64_info): + # ILP64 Openblas, with default symbol suffix + section = 'openblas64_' + dir_env_var = 'OPENBLAS64_' + _lib_names = ['openblas64_'] + symbol_suffix = '64_' + symbol_prefix = '' + +class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info): + pass + +class blis_info(blas_info): + section = 'blis' + dir_env_var = 'BLIS' + _lib_names = ['blis'] + notfounderror = BlasNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + opt = self.get_option_single('blis_libs', 'libraries') + blis_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs2(lib_dirs, blis_libs, []) + if info is None: + return + + # Add include dirs + incl_dirs = self.get_include_dirs() + dict_append(info, + language='c', + define_macros=[('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + self.set_info(**info) + + +class flame_info(system_info): + """ Usage of libflame for LAPACK operations + + This requires libflame to be compiled with lapack wrappers: + + ./configure --enable-lapack2flame ... + + Be aware that libflame 5.1.0 has some missing names in the shared library, so + if you have problems, try the static flame library. + """ + section = 'flame' + _lib_names = ['flame'] + notfounderror = FlameNotFoundError + + def check_embedded_lapack(self, info): + """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """ + c = customized_ccompiler() + + tmpdir = tempfile.mkdtemp() + s = textwrap.dedent("""\ + void zungqr_(); + int main(int argc, const char *argv[]) + { + zungqr_(); + return 0; + }""") + src = os.path.join(tmpdir, 'source.c') + out = os.path.join(tmpdir, 'a.out') + # Add the additional "extra" arguments + extra_args = info.get('extra_link_args', []) + try: + with open(src, 'w') as f: + f.write(s) + obj = c.compile([src], output_dir=tmpdir) + try: + c.link_executable(obj, out, libraries=info['libraries'], + library_dirs=info['library_dirs'], + extra_postargs=extra_args) + return True + except distutils.ccompiler.LinkError: + return False + finally: + shutil.rmtree(tmpdir) + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + flame_libs = self.get_libs('libraries', self._lib_names) + + info = self.check_libs2(lib_dirs, flame_libs, []) + if info is None: + return + + # Add the extra flag args to info + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + + if self.check_embedded_lapack(info): + # check if the user has supplied all information required + self.set_info(**info) + else: + # Try and get the BLAS lib to see if we can get it to work + blas_info = get_info('blas_opt') + if not blas_info: + # since we already failed once, this ain't going to work either + return + + # Now we need to merge the two dictionaries + for key in blas_info: + if isinstance(blas_info[key], list): + info[key] = info.get(key, []) + blas_info[key] + elif isinstance(blas_info[key], tuple): + info[key] = info.get(key, ()) + blas_info[key] + else: + info[key] = info.get(key, '') + blas_info[key] + + # Now check again + if self.check_embedded_lapack(info): + self.set_info(**info) + + +class accelerate_info(system_info): + section = 'accelerate' + _lib_names = ['accelerate', 'veclib'] + notfounderror = BlasNotFoundError + + def calc_info(self): + # Make possible to enable/disable from config file/env var + libraries = os.environ.get('ACCELERATE') + if libraries: + libraries = [libraries] + else: + libraries = self.get_libs('libraries', self._lib_names) + libraries = [lib.strip().lower() for lib in libraries] + + if (sys.platform == 'darwin' and + not os.getenv('_PYTHON_HOST_PLATFORM', None)): + # Use the system BLAS from Accelerate or vecLib under OSX + args = [] + link_args = [] + if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ + 'x86_64' in get_platform() or \ + 'i386' in platform.platform(): + intel = 1 + else: + intel = 0 + if (os.path.exists('/System/Library/Frameworks' + '/Accelerate.framework/') and + 'accelerate' in libraries): + if intel: + args.extend(['-msse3']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) + elif (os.path.exists('/System/Library/Frameworks' + '/vecLib.framework/') and + 'veclib' in libraries): + if intel: + args.extend(['-msse3']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,vecLib']) + + if args: + macros = [ + ('NO_ATLAS_INFO', 3), + ('HAVE_CBLAS', None), + ('ACCELERATE_NEW_LAPACK', None), + ] + if(os.getenv('NPY_USE_BLAS_ILP64', None)): + print('Setting HAVE_BLAS_ILP64') + macros += [ + ('HAVE_BLAS_ILP64', None), + ('ACCELERATE_LAPACK_ILP64', None), + ] + self.set_info(extra_compile_args=args, + extra_link_args=link_args, + define_macros=macros) + + return + +class accelerate_lapack_info(accelerate_info): + def _calc_info(self): + return super()._calc_info() + +class blas_src_info(system_info): + # BLAS_SRC is deprecated, please do not use this! + # Build or install a BLAS library via your package manager or from + # source separately. + section = 'blas_src' + dir_env_var = 'BLAS_SRC' + notfounderror = BlasSrcNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['blas'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'daxpy.f')): + src_dir = d + break + if not src_dir: + #XXX: Get sources from netlib. May be ask first. + return + blas1 = ''' + caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot + dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 + srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg + dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax + snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap + scabs1 + ''' + blas2 = ''' + cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv + chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv + dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv + sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger + stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc + zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 + ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv + ''' + blas3 = ''' + cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k + dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm + ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm + ''' + sources = [os.path.join(src_dir, f + '.f') \ + for f in (blas1 + blas2 + blas3).split()] + #XXX: should we check here actual existence of source files? + sources = [f for f in sources if os.path.isfile(f)] + info = {'sources': sources, 'language': 'f77'} + self.set_info(**info) + + +class x11_info(system_info): + section = 'x11' + notfounderror = X11NotFoundError + _lib_names = ['X11'] + + def __init__(self): + system_info.__init__(self, + default_lib_dirs=default_x11_lib_dirs, + default_include_dirs=default_x11_include_dirs) + + def calc_info(self): + if sys.platform in ['win32']: + return + lib_dirs = self.get_lib_dirs() + include_dirs = self.get_include_dirs() + opt = self.get_option_single('x11_libs', 'libraries') + x11_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, x11_libs, []) + if info is None: + return + inc_dir = None + for d in include_dirs: + if self.combine_paths(d, 'X11/X.h'): + inc_dir = d + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir]) + self.set_info(**info) + + +class _numpy_info(system_info): + section = 'Numeric' + modulename = 'Numeric' + notfounderror = NumericNotFoundError + + def __init__(self): + include_dirs = [] + try: + module = __import__(self.modulename) + prefix = [] + for name in module.__file__.split(os.sep): + if name == 'lib': + break + prefix.append(name) + + # Ask numpy for its own include path before attempting + # anything else + try: + include_dirs.append(getattr(module, 'get_include')()) + except AttributeError: + pass + + include_dirs.append(sysconfig.get_path('include')) + except ImportError: + pass + py_incl_dir = sysconfig.get_path('include') + include_dirs.append(py_incl_dir) + py_pincl_dir = sysconfig.get_path('platinclude') + if py_pincl_dir not in include_dirs: + include_dirs.append(py_pincl_dir) + for d in default_include_dirs: + d = os.path.join(d, os.path.basename(py_incl_dir)) + if d not in include_dirs: + include_dirs.append(d) + system_info.__init__(self, + default_lib_dirs=[], + default_include_dirs=include_dirs) + + def calc_info(self): + try: + module = __import__(self.modulename) + except ImportError: + return + info = {} + macros = [] + for v in ['__version__', 'version']: + vrs = getattr(module, v, None) + if vrs is None: + continue + macros = [(self.modulename.upper() + '_VERSION', + _c_string_literal(vrs)), + (self.modulename.upper(), None)] + break + dict_append(info, define_macros=macros) + include_dirs = self.get_include_dirs() + inc_dir = None + for d in include_dirs: + if self.combine_paths(d, + os.path.join(self.modulename, + 'arrayobject.h')): + inc_dir = d + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir]) + if info: + self.set_info(**info) + return + + +class numarray_info(_numpy_info): + section = 'numarray' + modulename = 'numarray' + + +class Numeric_info(_numpy_info): + section = 'Numeric' + modulename = 'Numeric' + + +class numpy_info(_numpy_info): + section = 'numpy' + modulename = 'numpy' + + +class numerix_info(system_info): + section = 'numerix' + + def calc_info(self): + which = None, None + if os.getenv("NUMERIX"): + which = os.getenv("NUMERIX"), "environment var" + # If all the above fail, default to numpy. + if which[0] is None: + which = "numpy", "defaulted" + try: + import numpy # noqa: F401 + which = "numpy", "defaulted" + except ImportError as e: + msg1 = str(e) + try: + import Numeric # noqa: F401 + which = "numeric", "defaulted" + except ImportError as e: + msg2 = str(e) + try: + import numarray # noqa: F401 + which = "numarray", "defaulted" + except ImportError as e: + msg3 = str(e) + log.info(msg1) + log.info(msg2) + log.info(msg3) + which = which[0].strip().lower(), which[1] + if which[0] not in ["numeric", "numarray", "numpy"]: + raise ValueError("numerix selector must be either 'Numeric' " + "or 'numarray' or 'numpy' but the value obtained" + " from the %s was '%s'." % (which[1], which[0])) + os.environ['NUMERIX'] = which[0] + self.set_info(**get_info(which[0])) + + +class f2py_info(system_info): + def calc_info(self): + try: + import numpy.f2py as f2py + except ImportError: + return + f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') + self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], + include_dirs=[f2py_dir]) + return + + +class boost_python_info(system_info): + section = 'boost_python' + dir_env_var = 'BOOST' + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['boost*'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', + 'module.cpp')): + src_dir = d + break + if not src_dir: + return + py_incl_dirs = [sysconfig.get_path('include')] + py_pincl_dir = sysconfig.get_path('platinclude') + if py_pincl_dir not in py_incl_dirs: + py_incl_dirs.append(py_pincl_dir) + srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') + bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) + bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) + info = {'libraries': [('boost_python_src', + {'include_dirs': [src_dir] + py_incl_dirs, + 'sources':bpl_srcs} + )], + 'include_dirs': [src_dir], + } + if info: + self.set_info(**info) + return + + +class agg2_info(system_info): + section = 'agg2' + dir_env_var = 'AGG2' + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['agg2*'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): + src_dir = d + break + if not src_dir: + return + if sys.platform == 'win32': + agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', + 'win32', 'agg_win32_bmp.cpp')) + else: + agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) + agg2_srcs += [os.path.join(src_dir, 'src', 'platform', + 'X11', + 'agg_platform_support.cpp')] + + info = {'libraries': + [('agg2_src', + {'sources': agg2_srcs, + 'include_dirs': [os.path.join(src_dir, 'include')], + } + )], + 'include_dirs': [os.path.join(src_dir, 'include')], + } + if info: + self.set_info(**info) + return + + +class _pkg_config_info(system_info): + section = None + config_env_var = 'PKG_CONFIG' + default_config_exe = 'pkg-config' + append_config_exe = '' + version_macro_name = None + release_macro_name = None + version_flag = '--modversion' + cflags_flag = '--cflags' + + def get_config_exe(self): + if self.config_env_var in os.environ: + return os.environ[self.config_env_var] + return self.default_config_exe + + def get_config_output(self, config_exe, option): + cmd = config_exe + ' ' + self.append_config_exe + ' ' + option + try: + o = subprocess.check_output(cmd) + except (OSError, subprocess.CalledProcessError): + pass + else: + o = filepath_from_subprocess_output(o) + return o + + def calc_info(self): + config_exe = find_executable(self.get_config_exe()) + if not config_exe: + log.warn('File not found: %s. Cannot determine %s info.' \ + % (config_exe, self.section)) + return + info = {} + macros = [] + libraries = [] + library_dirs = [] + include_dirs = [] + extra_link_args = [] + extra_compile_args = [] + version = self.get_config_output(config_exe, self.version_flag) + if version: + macros.append((self.__class__.__name__.split('.')[-1].upper(), + _c_string_literal(version))) + if self.version_macro_name: + macros.append((self.version_macro_name + '_%s' + % (version.replace('.', '_')), None)) + if self.release_macro_name: + release = self.get_config_output(config_exe, '--release') + if release: + macros.append((self.release_macro_name + '_%s' + % (release.replace('.', '_')), None)) + opts = self.get_config_output(config_exe, '--libs') + if opts: + for opt in opts.split(): + if opt[:2] == '-l': + libraries.append(opt[2:]) + elif opt[:2] == '-L': + library_dirs.append(opt[2:]) + else: + extra_link_args.append(opt) + opts = self.get_config_output(config_exe, self.cflags_flag) + if opts: + for opt in opts.split(): + if opt[:2] == '-I': + include_dirs.append(opt[2:]) + elif opt[:2] == '-D': + if '=' in opt: + n, v = opt[2:].split('=') + macros.append((n, v)) + else: + macros.append((opt[2:], None)) + else: + extra_compile_args.append(opt) + if macros: + dict_append(info, define_macros=macros) + if libraries: + dict_append(info, libraries=libraries) + if library_dirs: + dict_append(info, library_dirs=library_dirs) + if include_dirs: + dict_append(info, include_dirs=include_dirs) + if extra_link_args: + dict_append(info, extra_link_args=extra_link_args) + if extra_compile_args: + dict_append(info, extra_compile_args=extra_compile_args) + if info: + self.set_info(**info) + return + + +class wx_info(_pkg_config_info): + section = 'wx' + config_env_var = 'WX_CONFIG' + default_config_exe = 'wx-config' + append_config_exe = '' + version_macro_name = 'WX_VERSION' + release_macro_name = 'WX_RELEASE' + version_flag = '--version' + cflags_flag = '--cxxflags' + + +class gdk_pixbuf_xlib_2_info(_pkg_config_info): + section = 'gdk_pixbuf_xlib_2' + append_config_exe = 'gdk-pixbuf-xlib-2.0' + version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' + + +class gdk_pixbuf_2_info(_pkg_config_info): + section = 'gdk_pixbuf_2' + append_config_exe = 'gdk-pixbuf-2.0' + version_macro_name = 'GDK_PIXBUF_VERSION' + + +class gdk_x11_2_info(_pkg_config_info): + section = 'gdk_x11_2' + append_config_exe = 'gdk-x11-2.0' + version_macro_name = 'GDK_X11_VERSION' + + +class gdk_2_info(_pkg_config_info): + section = 'gdk_2' + append_config_exe = 'gdk-2.0' + version_macro_name = 'GDK_VERSION' + + +class gdk_info(_pkg_config_info): + section = 'gdk' + append_config_exe = 'gdk' + version_macro_name = 'GDK_VERSION' + + +class gtkp_x11_2_info(_pkg_config_info): + section = 'gtkp_x11_2' + append_config_exe = 'gtk+-x11-2.0' + version_macro_name = 'GTK_X11_VERSION' + + +class gtkp_2_info(_pkg_config_info): + section = 'gtkp_2' + append_config_exe = 'gtk+-2.0' + version_macro_name = 'GTK_VERSION' + + +class xft_info(_pkg_config_info): + section = 'xft' + append_config_exe = 'xft' + version_macro_name = 'XFT_VERSION' + + +class freetype2_info(_pkg_config_info): + section = 'freetype2' + append_config_exe = 'freetype2' + version_macro_name = 'FREETYPE2_VERSION' + + +class amd_info(system_info): + section = 'amd' + dir_env_var = 'AMD' + _lib_names = ['amd'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + opt = self.get_option_single('amd_libs', 'libraries') + amd_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, amd_libs, []) + if info is None: + return + + include_dirs = self.get_include_dirs() + + inc_dir = None + for d in include_dirs: + p = self.combine_paths(d, 'amd.h') + if p: + inc_dir = os.path.dirname(p[0]) + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir], + define_macros=[('SCIPY_AMD_H', None)], + swig_opts=['-I' + inc_dir]) + + self.set_info(**info) + return + + +class umfpack_info(system_info): + section = 'umfpack' + dir_env_var = 'UMFPACK' + notfounderror = UmfpackNotFoundError + _lib_names = ['umfpack'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + opt = self.get_option_single('umfpack_libs', 'libraries') + umfpack_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, umfpack_libs, []) + if info is None: + return + + include_dirs = self.get_include_dirs() + + inc_dir = None + for d in include_dirs: + p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') + if p: + inc_dir = os.path.dirname(p[0]) + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir], + define_macros=[('SCIPY_UMFPACK_H', None)], + swig_opts=['-I' + inc_dir]) + + dict_append(info, **get_info('amd')) + + self.set_info(**info) + return + + +def combine_paths(*args, **kws): + """ Return a list of existing paths composed by all combinations of + items from arguments. + """ + r = [] + for a in args: + if not a: + continue + if is_string(a): + a = [a] + r.append(a) + args = r + if not args: + return [] + if len(args) == 1: + result = reduce(lambda a, b: a + b, map(glob, args[0]), []) + elif len(args) == 2: + result = [] + for a0 in args[0]: + for a1 in args[1]: + result.extend(glob(os.path.join(a0, a1))) + else: + result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) + log.debug('(paths: %s)', ','.join(result)) + return result + +language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} +inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} + + +def dict_append(d, **kws): + languages = [] + for k, v in kws.items(): + if k == 'language': + languages.append(v) + continue + if k in d: + if k in ['library_dirs', 'include_dirs', + 'extra_compile_args', 'extra_link_args', + 'runtime_library_dirs', 'define_macros']: + [d[k].append(vv) for vv in v if vv not in d[k]] + else: + d[k].extend(v) + else: + d[k] = v + if languages: + l = inv_language_map[max([language_map.get(l, 0) for l in languages])] + d['language'] = l + return + + +def parseCmdLine(argv=(None,)): + import optparse + parser = optparse.OptionParser("usage: %prog [-v] [info objs]") + parser.add_option('-v', '--verbose', action='store_true', dest='verbose', + default=False, + help='be verbose and print more messages') + + opts, args = parser.parse_args(args=argv[1:]) + return opts, args + + +def show_all(argv=None): + import inspect + if argv is None: + argv = sys.argv + opts, args = parseCmdLine(argv) + if opts.verbose: + log.set_threshold(log.DEBUG) + else: + log.set_threshold(log.INFO) + show_only = [] + for n in args: + if n[-5:] != '_info': + n = n + '_info' + show_only.append(n) + show_all = not show_only + _gdict_ = globals().copy() + for name, c in _gdict_.items(): + if not inspect.isclass(c): + continue + if not issubclass(c, system_info) or c is system_info: + continue + if not show_all: + if name not in show_only: + continue + del show_only[show_only.index(name)] + conf = c() + conf.verbosity = 2 + # we don't need the result, but we want + # the side effect of printing diagnostics + conf.get_info() + if show_only: + log.info('Info classes not defined: %s', ','.join(show_only)) + +if __name__ == "__main__": + show_all() diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__init__.py b/phivenv/Lib/site-packages/numpy/distutils/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..139553ceab33cc7db582c6a8604fd51999e24297 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_build_ext.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_build_ext.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a5804839f247c951eb73b43b35f7995bd95d8a7 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_build_ext.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_ccompiler_opt.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_ccompiler_opt.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2b7a6a0f673fca3ae07a8cf3efb7a572f7c8eab Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_ccompiler_opt.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_ccompiler_opt_conf.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_ccompiler_opt_conf.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f90830175eadc57e554a5436e70806629064629 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_ccompiler_opt_conf.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_exec_command.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_exec_command.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc5c9b46078984bf2b3ab8f9d3b5d093df722f55 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_exec_command.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e5c0a5b845cce0dd2554d5c6db574d10496ac10 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_gnu.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_gnu.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9371ca5f524b1b641375d5ae445bed13a24012ad Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_gnu.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_intel.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_intel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..024edae9ebdbb7a717d1c92569c2aa58bae749f0 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_intel.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_nagfor.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_nagfor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29e97ed1880521e733412ee28c917424636c7428 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_nagfor.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_from_template.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_from_template.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..913e6a09a772f8e9acf45e6d6b5ac430de5de781 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_from_template.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_log.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_log.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52da4da8b02f4936048bcbe6bd8414f83ee33883 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_log.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_mingw32ccompiler.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_mingw32ccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9363cfed7f0b07aa272767be49bf21332182dcdc Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_mingw32ccompiler.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_misc_util.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_misc_util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e4294190fb3d230b3e55c01fb71e1be9b4ba1d6 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_misc_util.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_npy_pkg_config.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_npy_pkg_config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0346754d87faad7ca71790ded88bf032ecd5c200 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_npy_pkg_config.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_shell_utils.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_shell_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cefc76aa07d4f8b44b7e75619dab0f1a2e70901 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_shell_utils.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_system_info.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_system_info.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..775e6a1c61dc3ff8155d4b763c626f635f0042b3 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/test_system_info.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/utilities.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/utilities.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c14ac5f642058e7016d1de8cc14b3c2457aa8e9 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/distutils/tests/__pycache__/utilities.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_build_ext.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_build_ext.py new file mode 100644 index 0000000000000000000000000000000000000000..3b139872cdc743582545a366265963399f943f37 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_build_ext.py @@ -0,0 +1,74 @@ +'''Tests for numpy.distutils.build_ext.''' + +import os +import subprocess +import sys +from textwrap import indent, dedent +import pytest +from numpy.testing import IS_WASM + +@pytest.mark.skipif(IS_WASM, reason="cannot start subprocess in wasm") +@pytest.mark.slow +def test_multi_fortran_libs_link(tmp_path): + ''' + Ensures multiple "fake" static libraries are correctly linked. + see gh-18295 + ''' + + # We need to make sure we actually have an f77 compiler. + # This is nontrivial, so we'll borrow the utilities + # from f2py tests: + from numpy.distutils.tests.utilities import has_f77_compiler + if not has_f77_compiler(): + pytest.skip('No F77 compiler found') + + # make some dummy sources + with open(tmp_path / '_dummy1.f', 'w') as fid: + fid.write(indent(dedent('''\ + FUNCTION dummy_one() + RETURN + END FUNCTION'''), prefix=' '*6)) + with open(tmp_path / '_dummy2.f', 'w') as fid: + fid.write(indent(dedent('''\ + FUNCTION dummy_two() + RETURN + END FUNCTION'''), prefix=' '*6)) + with open(tmp_path / '_dummy.c', 'w') as fid: + # doesn't need to load - just needs to exist + fid.write('int PyInit_dummyext;') + + # make a setup file + with open(tmp_path / 'setup.py', 'w') as fid: + srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..') + fid.write(dedent(f'''\ + def configuration(parent_package="", top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration("", parent_package, top_path) + config.add_library("dummy1", sources=["_dummy1.f"]) + config.add_library("dummy2", sources=["_dummy2.f"]) + config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"]) + return config + + + if __name__ == "__main__": + import sys + sys.path.insert(0, r"{srctree}") + from numpy.distutils.core import setup + setup(**configuration(top_path="").todict())''')) + + # build the test extensino and "install" into a temporary directory + build_dir = tmp_path + subprocess.check_call([sys.executable, 'setup.py', 'build', 'install', + '--prefix', str(tmp_path / 'installdir'), + '--record', str(tmp_path / 'tmp_install_log.txt'), + ], + cwd=str(build_dir), + ) + # get the path to the so + so = None + with open(tmp_path /'tmp_install_log.txt') as fid: + for line in fid: + if 'dummyext' in line: + so = line.strip() + break + assert so is not None diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_ccompiler_opt.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_ccompiler_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..96af719251a9b0c085dd8c5c9e86956ec21725cb --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_ccompiler_opt.py @@ -0,0 +1,808 @@ +import re, textwrap, os +from os import sys, path +from distutils.errors import DistutilsError + +is_standalone = __name__ == '__main__' and __package__ is None +if is_standalone: + import unittest, contextlib, tempfile, shutil + sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) + from ccompiler_opt import CCompilerOpt + + # from numpy/testing/_private/utils.py + @contextlib.contextmanager + def tempdir(*args, **kwargs): + tmpdir = tempfile.mkdtemp(*args, **kwargs) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + + def assert_(expr, msg=''): + if not expr: + raise AssertionError(msg) +else: + from numpy.distutils.ccompiler_opt import CCompilerOpt + from numpy.testing import assert_, tempdir + +# architectures and compilers to test +arch_compilers = dict( + x86 = ("gcc", "clang", "icc", "iccw", "msvc"), + x64 = ("gcc", "clang", "icc", "iccw", "msvc"), + ppc64 = ("gcc", "clang"), + ppc64le = ("gcc", "clang"), + armhf = ("gcc", "clang"), + aarch64 = ("gcc", "clang", "fcc"), + s390x = ("gcc", "clang"), + noarch = ("gcc",) +) + +class FakeCCompilerOpt(CCompilerOpt): + fake_info = "" + def __init__(self, trap_files="", trap_flags="", *args, **kwargs): + self.fake_trap_files = trap_files + self.fake_trap_flags = trap_flags + CCompilerOpt.__init__(self, None, **kwargs) + + def __repr__(self): + return textwrap.dedent("""\ + <<<< + march : {} + compiler : {} + ---------------- + {} + >>>> + """).format(self.cc_march, self.cc_name, self.report()) + + def dist_compile(self, sources, flags, **kwargs): + assert(isinstance(sources, list)) + assert(isinstance(flags, list)) + if self.fake_trap_files: + for src in sources: + if re.match(self.fake_trap_files, src): + self.dist_error("source is trapped by a fake interface") + if self.fake_trap_flags: + for f in flags: + if re.match(self.fake_trap_flags, f): + self.dist_error("flag is trapped by a fake interface") + # fake objects + return zip(sources, [' '.join(flags)] * len(sources)) + + def dist_info(self): + return FakeCCompilerOpt.fake_info + + @staticmethod + def dist_log(*args, stderr=False): + pass + +class _Test_CCompilerOpt: + arch = None # x86_64 + cc = None # gcc + + def setup_class(self): + FakeCCompilerOpt.conf_nocache = True + self._opt = None + + def nopt(self, *args, **kwargs): + FakeCCompilerOpt.fake_info = (self.arch, self.cc, "") + return FakeCCompilerOpt(*args, **kwargs) + + def opt(self): + if not self._opt: + self._opt = self.nopt() + return self._opt + + def march(self): + return self.opt().cc_march + + def cc_name(self): + return self.opt().cc_name + + def get_targets(self, targets, groups, **kwargs): + FakeCCompilerOpt.conf_target_groups = groups + opt = self.nopt( + cpu_baseline=kwargs.get("baseline", "min"), + cpu_dispatch=kwargs.get("dispatch", "max"), + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + with tempdir() as tmpdir: + file = os.path.join(tmpdir, "test_targets.c") + with open(file, 'w') as f: + f.write(targets) + gtargets = [] + gflags = {} + fake_objects = opt.try_dispatch([file]) + for source, flags in fake_objects: + gtar = path.basename(source).split('.')[1:-1] + glen = len(gtar) + if glen == 0: + gtar = "baseline" + elif glen == 1: + gtar = gtar[0].upper() + else: + # converting multi-target into parentheses str format to be equivalent + # to the configuration statements syntax. + gtar = ('('+' '.join(gtar)+')').upper() + gtargets.append(gtar) + gflags[gtar] = flags + + has_baseline, targets = opt.sources_status[file] + targets = targets + ["baseline"] if has_baseline else targets + # convert tuple that represent multi-target into parentheses str format + targets = [ + '('+' '.join(tar)+')' if isinstance(tar, tuple) else tar + for tar in targets + ] + if len(targets) != len(gtargets) or not all(t in gtargets for t in targets): + raise AssertionError( + "'sources_status' returns different targets than the compiled targets\n" + "%s != %s" % (targets, gtargets) + ) + # return targets from 'sources_status' since the order is matters + return targets, gflags + + def arg_regex(self, **kwargs): + map2origin = dict( + x64 = "x86", + ppc64le = "ppc64", + aarch64 = "armhf", + clang = "gcc", + ) + march = self.march(); cc_name = self.cc_name() + map_march = map2origin.get(march, march) + map_cc = map2origin.get(cc_name, cc_name) + for key in ( + march, cc_name, map_march, map_cc, + march + '_' + cc_name, + map_march + '_' + cc_name, + march + '_' + map_cc, + map_march + '_' + map_cc, + ) : + regex = kwargs.pop(key, None) + if regex is not None: + break + if regex: + if isinstance(regex, dict): + for k, v in regex.items(): + if v[-1:] not in ')}$?\\.+*': + regex[k] = v + '$' + else: + assert(isinstance(regex, str)) + if regex[-1:] not in ')}$?\\.+*': + regex += '$' + return regex + + def expect(self, dispatch, baseline="", **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + opt = self.nopt( + cpu_baseline=baseline, cpu_dispatch=dispatch, + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + features = ' '.join(opt.cpu_dispatch_names()) + if not match: + if len(features) != 0: + raise AssertionError( + 'expected empty features, not "%s"' % features + ) + return + if not re.match(match, features, re.IGNORECASE): + raise AssertionError( + 'dispatch features "%s" not match "%s"' % (features, match) + ) + + def expect_baseline(self, baseline, dispatch="", **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + opt = self.nopt( + cpu_baseline=baseline, cpu_dispatch=dispatch, + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + features = ' '.join(opt.cpu_baseline_names()) + if not match: + if len(features) != 0: + raise AssertionError( + 'expected empty features, not "%s"' % features + ) + return + if not re.match(match, features, re.IGNORECASE): + raise AssertionError( + 'baseline features "%s" not match "%s"' % (features, match) + ) + + def expect_flags(self, baseline, dispatch="", **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + opt = self.nopt( + cpu_baseline=baseline, cpu_dispatch=dispatch, + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + flags = ' '.join(opt.cpu_baseline_flags()) + if not match: + if len(flags) != 0: + raise AssertionError( + 'expected empty flags not "%s"' % flags + ) + return + if not re.match(match, flags): + raise AssertionError( + 'flags "%s" not match "%s"' % (flags, match) + ) + + def expect_targets(self, targets, groups={}, **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + targets, _ = self.get_targets(targets=targets, groups=groups, **kwargs) + targets = ' '.join(targets) + if not match: + if len(targets) != 0: + raise AssertionError( + 'expected empty targets, not "%s"' % targets + ) + return + if not re.match(match, targets, re.IGNORECASE): + raise AssertionError( + 'targets "%s" not match "%s"' % (targets, match) + ) + + def expect_target_flags(self, targets, groups={}, **kwargs): + match_dict = self.arg_regex(**kwargs) + if match_dict is None: + return + assert(isinstance(match_dict, dict)) + _, tar_flags = self.get_targets(targets=targets, groups=groups) + + for match_tar, match_flags in match_dict.items(): + if match_tar not in tar_flags: + raise AssertionError( + 'expected to find target "%s"' % match_tar + ) + flags = tar_flags[match_tar] + if not match_flags: + if len(flags) != 0: + raise AssertionError( + 'expected to find empty flags in target "%s"' % match_tar + ) + if not re.match(match_flags, flags): + raise AssertionError( + '"%s" flags "%s" not match "%s"' % (match_tar, flags, match_flags) + ) + + def test_interface(self): + wrong_arch = "ppc64" if self.arch != "ppc64" else "x86" + wrong_cc = "clang" if self.cc != "clang" else "icc" + opt = self.opt() + assert_(getattr(opt, "cc_on_" + self.arch)) + assert_(not getattr(opt, "cc_on_" + wrong_arch)) + assert_(getattr(opt, "cc_is_" + self.cc)) + assert_(not getattr(opt, "cc_is_" + wrong_cc)) + + def test_args_empty(self): + for baseline, dispatch in ( + ("", "none"), + (None, ""), + ("none +none", "none - none"), + ("none -max", "min - max"), + ("+vsx2 -VSX2", "vsx avx2 avx512f -max"), + ("max -vsx - avx + avx512f neon -MAX ", + "min -min + max -max -vsx + avx2 -avx2 +NONE") + ) : + opt = self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) + assert(len(opt.cpu_baseline_names()) == 0) + assert(len(opt.cpu_dispatch_names()) == 0) + + def test_args_validation(self): + if self.march() == "unknown": + return + # check sanity of argument's validation + for baseline, dispatch in ( + ("unkown_feature - max +min", "unknown max min"), # unknowing features + ("#avx2", "$vsx") # groups and polices aren't acceptable + ) : + try: + self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) + raise AssertionError("excepted an exception for invalid arguments") + except DistutilsError: + pass + + def test_skip(self): + # only takes what platform supports and skip the others + # without casing exceptions + self.expect( + "sse vsx neon", + x86="sse", ppc64="vsx", armhf="neon", unknown="" + ) + self.expect( + "sse41 avx avx2 vsx2 vsx3 neon_vfpv4 asimd", + x86 = "sse41 avx avx2", + ppc64 = "vsx2 vsx3", + armhf = "neon_vfpv4 asimd", + unknown = "" + ) + # any features in cpu_dispatch must be ignored if it's part of baseline + self.expect( + "sse neon vsx", baseline="sse neon vsx", + x86="", ppc64="", armhf="" + ) + self.expect( + "avx2 vsx3 asimdhp", baseline="avx2 vsx3 asimdhp", + x86="", ppc64="", armhf="" + ) + + def test_implies(self): + # baseline combining implied features, so we count + # on it instead of testing 'feature_implies()'' directly + self.expect_baseline( + "fma3 avx2 asimd vsx3", + # .* between two spaces can validate features in between + x86 = "sse .* sse41 .* fma3.*avx2", + ppc64 = "vsx vsx2 vsx3", + armhf = "neon neon_fp16 neon_vfpv4 asimd" + ) + """ + special cases + """ + # in icc and msvc, FMA3 and AVX2 can't be separated + # both need to implies each other, same for avx512f & cd + for f0, f1 in ( + ("fma3", "avx2"), + ("avx512f", "avx512cd"), + ): + diff = ".* sse42 .* %s .*%s$" % (f0, f1) + self.expect_baseline(f0, + x86_gcc=".* sse42 .* %s$" % f0, + x86_icc=diff, x86_iccw=diff + ) + self.expect_baseline(f1, + x86_gcc=".* avx .* %s$" % f1, + x86_icc=diff, x86_iccw=diff + ) + # in msvc, following features can't be separated too + for f in (("fma3", "avx2"), ("avx512f", "avx512cd", "avx512_skx")): + for ff in f: + self.expect_baseline(ff, + x86_msvc=".*%s" % ' '.join(f) + ) + + # in ppc64le VSX and VSX2 can't be separated + self.expect_baseline("vsx", ppc64le="vsx vsx2") + # in aarch64 following features can't be separated + for f in ("neon", "neon_fp16", "neon_vfpv4", "asimd"): + self.expect_baseline(f, aarch64="neon neon_fp16 neon_vfpv4 asimd") + + def test_args_options(self): + # max & native + for o in ("max", "native"): + if o == "native" and self.cc_name() == "msvc": + continue + self.expect(o, + trap_files=".*cpu_(sse|vsx|neon|vx).c", + x86="", ppc64="", armhf="", s390x="" + ) + self.expect(o, + trap_files=".*cpu_(sse3|vsx2|neon_vfpv4|vxe).c", + x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16", + aarch64="", ppc64le="", s390x="vx" + ) + self.expect(o, + trap_files=".*cpu_(popcnt|vsx3).c", + x86="sse .* sse41", ppc64="vsx vsx2", + armhf="neon neon_fp16 .* asimd .*", + s390x="vx vxe vxe2" + ) + self.expect(o, + x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*", + # in icc, xop and fam4 aren't supported + x86_icc=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", + x86_iccw=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", + # in msvc, avx512_knl avx512_knm aren't supported + x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*", + armhf=".* asimd asimdhp asimddp .*", + ppc64="vsx vsx2 vsx3 vsx4.*", + s390x="vx vxe vxe2.*" + ) + # min + self.expect("min", + x86="sse sse2", x64="sse sse2 sse3", + armhf="", aarch64="neon neon_fp16 .* asimd", + ppc64="", ppc64le="vsx vsx2", s390x="" + ) + self.expect( + "min", trap_files=".*cpu_(sse2|vsx2).c", + x86="", ppc64le="" + ) + # an exception must triggered if native flag isn't supported + # when option "native" is activated through the args + try: + self.expect("native", + trap_flags=".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", + x86=".*", ppc64=".*", armhf=".*", s390x=".*", aarch64=".*", + ) + if self.march() != "unknown": + raise AssertionError( + "excepted an exception for %s" % self.march() + ) + except DistutilsError: + if self.march() == "unknown": + raise AssertionError("excepted no exceptions") + + def test_flags(self): + self.expect_flags( + "sse sse2 vsx vsx2 neon neon_fp16 vx vxe", + x86_gcc="-msse -msse2", x86_icc="-msse -msse2", + x86_iccw="/arch:SSE2", + x86_msvc="/arch:SSE2" if self.march() == "x86" else "", + ppc64_gcc= "-mcpu=power8", + ppc64_clang="-mcpu=power8", + armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee", + aarch64="", + s390x="-mzvector -march=arch12" + ) + # testing normalize -march + self.expect_flags( + "asimd", + aarch64="", + armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8-a\+simd" + ) + self.expect_flags( + "asimdhp", + aarch64_gcc=r"-march=armv8.2-a\+fp16", + armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8.2-a\+fp16" + ) + self.expect_flags( + "asimddp", aarch64_gcc=r"-march=armv8.2-a\+dotprod" + ) + self.expect_flags( + # asimdfhm implies asimdhp + "asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+fp16\+fp16fml" + ) + self.expect_flags( + "asimddp asimdhp asimdfhm", + aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml" + ) + self.expect_flags( + "vx vxe vxe2", + s390x=r"-mzvector -march=arch13" + ) + + def test_targets_exceptions(self): + for targets in ( + "bla bla", "/*@targets", + "/*@targets */", + "/*@targets unknown */", + "/*@targets $unknown_policy avx2 */", + "/*@targets #unknown_group avx2 */", + "/*@targets $ */", + "/*@targets # vsx */", + "/*@targets #$ vsx */", + "/*@targets vsx avx2 ) */", + "/*@targets vsx avx2 (avx2 */", + "/*@targets vsx avx2 () */", + "/*@targets vsx avx2 ($autovec) */", # no features + "/*@targets vsx avx2 (xxx) */", + "/*@targets vsx avx2 (baseline) */", + ) : + try: + self.expect_targets( + targets, + x86="", armhf="", ppc64="", s390x="" + ) + if self.march() != "unknown": + raise AssertionError( + "excepted an exception for %s" % self.march() + ) + except DistutilsError: + if self.march() == "unknown": + raise AssertionError("excepted no exceptions") + + def test_targets_syntax(self): + for targets in ( + "/*@targets $keep_baseline sse vsx neon vx*/", + "/*@targets,$keep_baseline,sse,vsx,neon vx*/", + "/*@targets*$keep_baseline*sse*vsx*neon*vx*/", + """ + /* + ** @targets + ** $keep_baseline, sse vsx,neon, vx + */ + """, + """ + /* + ************@targets**************** + ** $keep_baseline, sse vsx, neon, vx + ************************************ + */ + """, + """ + /* + /////////////@targets///////////////// + //$keep_baseline//sse//vsx//neon//vx + ///////////////////////////////////// + */ + """, + """ + /* + @targets + $keep_baseline + SSE VSX NEON VX*/ + """ + ) : + self.expect_targets(targets, + x86="sse", ppc64="vsx", armhf="neon", s390x="vx", unknown="" + ) + + def test_targets(self): + # test skipping baseline features + self.expect_targets( + """ + /*@targets + sse sse2 sse41 avx avx2 avx512f + vsx vsx2 vsx3 vsx4 + neon neon_fp16 asimdhp asimddp + vx vxe vxe2 + */ + """, + baseline="avx vsx2 asimd vx vxe", + x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx4 vsx3", + s390x="vxe2" + ) + # test skipping non-dispatch features + self.expect_targets( + """ + /*@targets + sse41 avx avx2 avx512f + vsx2 vsx3 vsx4 + asimd asimdhp asimddp + vx vxe vxe2 + */ + """, + baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp vxe2", + x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2", s390x="vxe2" + ) + # test skipping features that not supported + self.expect_targets( + """ + /*@targets + sse2 sse41 avx2 avx512f + vsx2 vsx3 vsx4 + neon asimdhp asimddp + vx vxe vxe2 + */ + """, + baseline="", + trap_files=".*(avx2|avx512f|vsx3|vsx4|asimddp|vxe2).c", + x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon", + s390x="vxe vx" + ) + # test skipping features that implies each other + self.expect_targets( + """ + /*@targets + sse sse2 avx fma3 avx2 avx512f avx512cd + vsx vsx2 vsx3 + neon neon_vfpv4 neon_fp16 neon_fp16 asimd asimdhp + asimddp asimdfhm + */ + """, + baseline="", + x86_gcc="avx512cd avx512f avx2 fma3 avx sse2", + x86_msvc="avx512cd avx2 avx sse2", + x86_icc="avx512cd avx2 avx sse2", + x86_iccw="avx512cd avx2 avx sse2", + ppc64="vsx3 vsx2 vsx", + ppc64le="vsx3 vsx2", + armhf="asimdfhm asimddp asimdhp asimd neon_vfpv4 neon_fp16 neon", + aarch64="asimdfhm asimddp asimdhp asimd" + ) + + def test_targets_policies(self): + # 'keep_baseline', generate objects for baseline features + self.expect_targets( + """ + /*@targets + $keep_baseline + sse2 sse42 avx2 avx512f + vsx2 vsx3 + neon neon_vfpv4 asimd asimddp + vx vxe vxe2 + */ + """, + baseline="sse41 avx2 vsx2 asimd vsx3 vxe", + x86="avx512f avx2 sse42 sse2", + ppc64="vsx3 vsx2", + armhf="asimddp asimd neon_vfpv4 neon", + # neon, neon_vfpv4, asimd implies each other + aarch64="asimddp asimd", + s390x="vxe2 vxe vx" + ) + # 'keep_sort', leave the sort as-is + self.expect_targets( + """ + /*@targets + $keep_baseline $keep_sort + avx512f sse42 avx2 sse2 + vsx2 vsx3 + asimd neon neon_vfpv4 asimddp + vxe vxe2 + */ + """, + x86="avx512f sse42 avx2 sse2", + ppc64="vsx2 vsx3", + armhf="asimd neon neon_vfpv4 asimddp", + # neon, neon_vfpv4, asimd implies each other + aarch64="asimd asimddp", + s390x="vxe vxe2" + ) + # 'autovec', skipping features that can't be + # vectorized by the compiler + self.expect_targets( + """ + /*@targets + $keep_baseline $keep_sort $autovec + avx512f avx2 sse42 sse41 sse2 + vsx3 vsx2 + asimddp asimd neon_vfpv4 neon + */ + """, + x86_gcc="avx512f avx2 sse42 sse41 sse2", + x86_icc="avx512f avx2 sse42 sse41 sse2", + x86_iccw="avx512f avx2 sse42 sse41 sse2", + x86_msvc="avx512f avx2 sse2" + if self.march() == 'x86' else "avx512f avx2", + ppc64="vsx3 vsx2", + armhf="asimddp asimd neon_vfpv4 neon", + # neon, neon_vfpv4, asimd implies each other + aarch64="asimddp asimd" + ) + for policy in ("$maxopt", "$autovec"): + # 'maxopt' and autovec set the max acceptable optimization flags + self.expect_target_flags( + "/*@targets baseline %s */" % policy, + gcc={"baseline":".*-O3.*"}, icc={"baseline":".*-O3.*"}, + iccw={"baseline":".*/O3.*"}, msvc={"baseline":".*/O2.*"}, + unknown={"baseline":".*"} + ) + + # 'werror', force compilers to treat warnings as errors + self.expect_target_flags( + "/*@targets baseline $werror */", + gcc={"baseline":".*-Werror.*"}, icc={"baseline":".*-Werror.*"}, + iccw={"baseline":".*/Werror.*"}, msvc={"baseline":".*/WX.*"}, + unknown={"baseline":".*"} + ) + + def test_targets_groups(self): + self.expect_targets( + """ + /*@targets $keep_baseline baseline #test_group */ + """, + groups=dict( + test_group=(""" + $keep_baseline + asimddp sse2 vsx2 avx2 vsx3 + avx512f asimdhp + """) + ), + x86="avx512f avx2 sse2 baseline", + ppc64="vsx3 vsx2 baseline", + armhf="asimddp asimdhp baseline" + ) + # test skip duplicating and sorting + self.expect_targets( + """ + /*@targets + * sse42 avx avx512f + * #test_group_1 + * vsx2 + * #test_group_2 + * asimddp asimdfhm + */ + """, + groups=dict( + test_group_1=(""" + VSX2 vsx3 asimd avx2 SSE41 + """), + test_group_2=(""" + vsx2 vsx3 asImd aVx2 sse41 + """) + ), + x86="avx512f avx2 avx sse42 sse41", + ppc64="vsx3 vsx2", + # vsx2 part of the default baseline of ppc64le, option ("min") + ppc64le="vsx3", + armhf="asimdfhm asimddp asimd", + # asimd part of the default baseline of aarch64, option ("min") + aarch64="asimdfhm asimddp" + ) + + def test_targets_multi(self): + self.expect_targets( + """ + /*@targets + (avx512_clx avx512_cnl) (asimdhp asimddp) + */ + """, + x86=r"\(avx512_clx avx512_cnl\)", + armhf=r"\(asimdhp asimddp\)", + ) + # test skipping implied features and auto-sort + self.expect_targets( + """ + /*@targets + f16c (sse41 avx sse42) (sse3 avx2 avx512f) + vsx2 (vsx vsx3 vsx2) + (neon neon_vfpv4 asimd asimdhp asimddp) + */ + """, + x86="avx512f f16c avx", + ppc64="vsx3 vsx2", + ppc64le="vsx3", # vsx2 part of baseline + armhf=r"\(asimdhp asimddp\)", + ) + # test skipping implied features and keep sort + self.expect_targets( + """ + /*@targets $keep_sort + (sse41 avx sse42) (sse3 avx2 avx512f) + (vsx vsx3 vsx2) + (asimddp neon neon_vfpv4 asimd asimdhp) + (vx vxe vxe2) + */ + """, + x86="avx avx512f", + ppc64="vsx3", + armhf=r"\(asimdhp asimddp\)", + s390x="vxe2" + ) + # test compiler variety and avoiding duplicating + self.expect_targets( + """ + /*@targets $keep_sort + fma3 avx2 (fma3 avx2) (avx2 fma3) avx2 fma3 + */ + """, + x86_gcc=r"fma3 avx2 \(fma3 avx2\)", + x86_icc="avx2", x86_iccw="avx2", + x86_msvc="avx2" + ) + +def new_test(arch, cc): + if is_standalone: return textwrap.dedent("""\ + class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt, unittest.TestCase): + arch = '{arch}' + cc = '{cc}' + def __init__(self, methodName="runTest"): + unittest.TestCase.__init__(self, methodName) + self.setup_class() + """).format( + class_name=arch + '_' + cc, arch=arch, cc=cc + ) + return textwrap.dedent("""\ + class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt): + arch = '{arch}' + cc = '{cc}' + """).format( + class_name=arch + '_' + cc, arch=arch, cc=cc + ) +""" +if 1 and is_standalone: + FakeCCompilerOpt.fake_info = "x86_icc" + cco = FakeCCompilerOpt(None, cpu_baseline="avx2") + print(' '.join(cco.cpu_baseline_names())) + print(cco.cpu_baseline_flags()) + unittest.main() + sys.exit() +""" +for arch, compilers in arch_compilers.items(): + for cc in compilers: + exec(new_test(arch, cc)) + +if is_standalone: + unittest.main() diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py new file mode 100644 index 0000000000000000000000000000000000000000..def19428a1799e9205bb14231c0ffceef2bda4ba --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py @@ -0,0 +1,176 @@ +import unittest +from os import sys, path + +is_standalone = __name__ == '__main__' and __package__ is None +if is_standalone: + sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) + from ccompiler_opt import CCompilerOpt +else: + from numpy.distutils.ccompiler_opt import CCompilerOpt + +arch_compilers = dict( + x86 = ("gcc", "clang", "icc", "iccw", "msvc"), + x64 = ("gcc", "clang", "icc", "iccw", "msvc"), + ppc64 = ("gcc", "clang"), + ppc64le = ("gcc", "clang"), + armhf = ("gcc", "clang"), + aarch64 = ("gcc", "clang"), + narch = ("gcc",) +) + +class FakeCCompilerOpt(CCompilerOpt): + fake_info = ("arch", "compiler", "extra_args") + def __init__(self, *args, **kwargs): + CCompilerOpt.__init__(self, None, **kwargs) + def dist_compile(self, sources, flags, **kwargs): + return sources + def dist_info(self): + return FakeCCompilerOpt.fake_info + @staticmethod + def dist_log(*args, stderr=False): + pass + +class _TestConfFeatures(FakeCCompilerOpt): + """A hook to check the sanity of configured features +- before it called by the abstract class '_Feature' + """ + + def conf_features_partial(self): + conf_all = self.conf_features + for feature_name, feature in conf_all.items(): + self.test_feature( + "attribute conf_features", + conf_all, feature_name, feature + ) + + conf_partial = FakeCCompilerOpt.conf_features_partial(self) + for feature_name, feature in conf_partial.items(): + self.test_feature( + "conf_features_partial()", + conf_partial, feature_name, feature + ) + return conf_partial + + def test_feature(self, log, search_in, feature_name, feature_dict): + error_msg = ( + "during validate '{}' within feature '{}', " + "march '{}' and compiler '{}'\n>> " + ).format(log, feature_name, self.cc_march, self.cc_name) + + if not feature_name.isupper(): + raise AssertionError(error_msg + "feature name must be in uppercase") + + for option, val in feature_dict.items(): + self.test_option_types(error_msg, option, val) + self.test_duplicates(error_msg, option, val) + + self.test_implies(error_msg, search_in, feature_name, feature_dict) + self.test_group(error_msg, search_in, feature_name, feature_dict) + self.test_extra_checks(error_msg, search_in, feature_name, feature_dict) + + def test_option_types(self, error_msg, option, val): + for tp, available in ( + ((str, list), ( + "implies", "headers", "flags", "group", "detect", "extra_checks" + )), + ((str,), ("disable",)), + ((int,), ("interest",)), + ((bool,), ("implies_detect",)), + ((bool, type(None)), ("autovec",)), + ) : + found_it = option in available + if not found_it: + continue + if not isinstance(val, tp): + error_tp = [t.__name__ for t in (*tp,)] + error_tp = ' or '.join(error_tp) + raise AssertionError(error_msg + + "expected '%s' type for option '%s' not '%s'" % ( + error_tp, option, type(val).__name__ + )) + break + + if not found_it: + raise AssertionError(error_msg + "invalid option name '%s'" % option) + + def test_duplicates(self, error_msg, option, val): + if option not in ( + "implies", "headers", "flags", "group", "detect", "extra_checks" + ) : return + + if isinstance(val, str): + val = val.split() + + if len(val) != len(set(val)): + raise AssertionError(error_msg + "duplicated values in option '%s'" % option) + + def test_implies(self, error_msg, search_in, feature_name, feature_dict): + if feature_dict.get("disabled") is not None: + return + implies = feature_dict.get("implies", "") + if not implies: + return + if isinstance(implies, str): + implies = implies.split() + + if feature_name in implies: + raise AssertionError(error_msg + "feature implies itself") + + for impl in implies: + impl_dict = search_in.get(impl) + if impl_dict is not None: + if "disable" in impl_dict: + raise AssertionError(error_msg + "implies disabled feature '%s'" % impl) + continue + raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl) + + def test_group(self, error_msg, search_in, feature_name, feature_dict): + if feature_dict.get("disabled") is not None: + return + group = feature_dict.get("group", "") + if not group: + return + if isinstance(group, str): + group = group.split() + + for f in group: + impl_dict = search_in.get(f) + if not impl_dict or "disable" in impl_dict: + continue + raise AssertionError(error_msg + + "in option 'group', '%s' already exists as a feature name" % f + ) + + def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict): + if feature_dict.get("disabled") is not None: + return + extra_checks = feature_dict.get("extra_checks", "") + if not extra_checks: + return + if isinstance(extra_checks, str): + extra_checks = extra_checks.split() + + for f in extra_checks: + impl_dict = search_in.get(f) + if not impl_dict or "disable" in impl_dict: + continue + raise AssertionError(error_msg + + "in option 'extra_checks', extra test case '%s' already exists as a feature name" % f + ) + +class TestConfFeatures(unittest.TestCase): + def __init__(self, methodName="runTest"): + unittest.TestCase.__init__(self, methodName) + self._setup() + + def _setup(self): + FakeCCompilerOpt.conf_nocache = True + + def test_features(self): + for arch, compilers in arch_compilers.items(): + for cc in compilers: + FakeCCompilerOpt.fake_info = (arch, cc, "") + _TestConfFeatures() + +if is_standalone: + unittest.main() diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_exec_command.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_exec_command.py new file mode 100644 index 0000000000000000000000000000000000000000..9be3540582ffe496efb247791f3c9047651802e0 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_exec_command.py @@ -0,0 +1,217 @@ +import os +import pytest +import sys +from tempfile import TemporaryFile + +from numpy.distutils import exec_command +from numpy.distutils.exec_command import get_pythonexe +from numpy.testing import tempdir, assert_, assert_warns, IS_WASM + + +# In python 3 stdout, stderr are text (unicode compliant) devices, so to +# emulate them import StringIO from the io module. +from io import StringIO + +class redirect_stdout: + """Context manager to redirect stdout for exec_command test.""" + def __init__(self, stdout=None): + self._stdout = stdout or sys.stdout + + def __enter__(self): + self.old_stdout = sys.stdout + sys.stdout = self._stdout + + def __exit__(self, exc_type, exc_value, traceback): + self._stdout.flush() + sys.stdout = self.old_stdout + # note: closing sys.stdout won't close it. + self._stdout.close() + +class redirect_stderr: + """Context manager to redirect stderr for exec_command test.""" + def __init__(self, stderr=None): + self._stderr = stderr or sys.stderr + + def __enter__(self): + self.old_stderr = sys.stderr + sys.stderr = self._stderr + + def __exit__(self, exc_type, exc_value, traceback): + self._stderr.flush() + sys.stderr = self.old_stderr + # note: closing sys.stderr won't close it. + self._stderr.close() + +class emulate_nonposix: + """Context manager to emulate os.name != 'posix' """ + def __init__(self, osname='non-posix'): + self._new_name = osname + + def __enter__(self): + self._old_name = os.name + os.name = self._new_name + + def __exit__(self, exc_type, exc_value, traceback): + os.name = self._old_name + + +def test_exec_command_stdout(): + # Regression test for gh-2999 and gh-2915. + # There are several packages (nose, scipy.weave.inline, Sage inline + # Fortran) that replace stdout, in which case it doesn't have a fileno + # method. This is tested here, with a do-nothing command that fails if the + # presence of fileno() is assumed in exec_command. + + # The code has a special case for posix systems, so if we are on posix test + # both that the special case works and that the generic code works. + + # Test posix version: + with redirect_stdout(StringIO()): + with redirect_stderr(TemporaryFile()): + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") + + if os.name == 'posix': + # Test general (non-posix) version: + with emulate_nonposix(): + with redirect_stdout(StringIO()): + with redirect_stderr(TemporaryFile()): + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") + +def test_exec_command_stderr(): + # Test posix version: + with redirect_stdout(TemporaryFile(mode='w+')): + with redirect_stderr(StringIO()): + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") + + if os.name == 'posix': + # Test general (non-posix) version: + with emulate_nonposix(): + with redirect_stdout(TemporaryFile()): + with redirect_stderr(StringIO()): + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +class TestExecCommand: + def setup_method(self): + self.pyexe = get_pythonexe() + + def check_nt(self, **kws): + s, o = exec_command.exec_command('cmd /C echo path=%path%') + assert_(s == 0) + assert_(o != '') + + s, o = exec_command.exec_command( + '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe) + assert_(s == 0) + assert_(o == 'win32') + + def check_posix(self, **kws): + s, o = exec_command.exec_command("echo Hello", **kws) + assert_(s == 0) + assert_(o == 'Hello') + + s, o = exec_command.exec_command('echo $AAA', **kws) + assert_(s == 0) + assert_(o == '') + + s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws) + assert_(s == 0) + assert_(o == 'Tere') + + s, o = exec_command.exec_command('echo "$AAA"', **kws) + assert_(s == 0) + assert_(o == '') + + if 'BBB' not in os.environ: + os.environ['BBB'] = 'Hi' + s, o = exec_command.exec_command('echo "$BBB"', **kws) + assert_(s == 0) + assert_(o == 'Hi') + + s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws) + assert_(s == 0) + assert_(o == 'Hey') + + s, o = exec_command.exec_command('echo "$BBB"', **kws) + assert_(s == 0) + assert_(o == 'Hi') + + del os.environ['BBB'] + + s, o = exec_command.exec_command('echo "$BBB"', **kws) + assert_(s == 0) + assert_(o == '') + + + s, o = exec_command.exec_command('this_is_not_a_command', **kws) + assert_(s != 0) + assert_(o != '') + + s, o = exec_command.exec_command('echo path=$PATH', **kws) + assert_(s == 0) + assert_(o != '') + + s, o = exec_command.exec_command( + '"%s" -c "import sys,os;sys.stderr.write(os.name)"' % + self.pyexe, **kws) + assert_(s == 0) + assert_(o == 'posix') + + def check_basic(self, *kws): + s, o = exec_command.exec_command( + '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws) + assert_(s != 0) + assert_(o != '') + + s, o = exec_command.exec_command( + '"%s" -c "import sys;sys.stderr.write(\'0\');' + 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % + self.pyexe, **kws) + assert_(s == 0) + assert_(o == '012') + + s, o = exec_command.exec_command( + '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws) + assert_(s == 15) + assert_(o == '') + + s, o = exec_command.exec_command( + '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws) + assert_(s == 0) + assert_(o == 'Heipa') + + def check_execute_in(self, **kws): + with tempdir() as tmpdir: + fn = "file" + tmpfile = os.path.join(tmpdir, fn) + with open(tmpfile, 'w') as f: + f.write('Hello') + + s, o = exec_command.exec_command( + '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % + (self.pyexe, fn), **kws) + assert_(s != 0) + assert_(o != '') + s, o = exec_command.exec_command( + '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); ' + 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws) + assert_(s == 0) + assert_(o == 'Hello') + + def test_basic(self): + with redirect_stdout(StringIO()): + with redirect_stderr(StringIO()): + with assert_warns(DeprecationWarning): + if os.name == "posix": + self.check_posix(use_tee=0) + self.check_posix(use_tee=1) + elif os.name == "nt": + self.check_nt(use_tee=0) + self.check_nt(use_tee=1) + self.check_execute_in(use_tee=0) + self.check_execute_in(use_tee=1) diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_fcompiler.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_fcompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..1d24aa62df5aae423623fad908b829790d2de491 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_fcompiler.py @@ -0,0 +1,43 @@ +from numpy.testing import assert_ +import numpy.distutils.fcompiler + +customizable_flags = [ + ('f77', 'F77FLAGS'), + ('f90', 'F90FLAGS'), + ('free', 'FREEFLAGS'), + ('arch', 'FARCH'), + ('debug', 'FDEBUG'), + ('flags', 'FFLAGS'), + ('linker_so', 'LDFLAGS'), +] + + +def test_fcompiler_flags(monkeypatch): + monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') + flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) + + for opt, envvar in customizable_flags: + new_flag = '-dummy-{}-flag'.format(opt) + prev_flags = getattr(flag_vars, opt) + + monkeypatch.setenv(envvar, new_flag) + new_flags = getattr(flag_vars, opt) + + monkeypatch.delenv(envvar) + assert_(new_flags == [new_flag]) + + monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') + + for opt, envvar in customizable_flags: + new_flag = '-dummy-{}-flag'.format(opt) + prev_flags = getattr(flag_vars, opt) + monkeypatch.setenv(envvar, new_flag) + new_flags = getattr(flag_vars, opt) + + monkeypatch.delenv(envvar) + if prev_flags is None: + assert_(new_flags == [new_flag]) + else: + assert_(new_flags == prev_flags + [new_flag]) + diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py new file mode 100644 index 0000000000000000000000000000000000000000..52ceb973283f8e601e70efd77745c4f28046cb05 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py @@ -0,0 +1,55 @@ +from numpy.testing import assert_ + +import numpy.distutils.fcompiler + +g77_version_strings = [ + ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), + ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), + ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), + ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), + ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' + ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), +] + +gfortran_version_strings = [ + ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', + '4.0.3'), + ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), + ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), + ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), + ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'), + ('4.8.0', '4.8.0'), + ('4.0.3-7', '4.0.3'), + ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1", + '4.9.1'), + ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n" + "gfortran: warning: yet another warning\n4.9.1", + '4.9.1'), + ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0') +] + +class TestG77Versions: + def test_g77_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') + for vs, version in g77_version_strings: + v = fc.version_match(vs) + assert_(v == version, (vs, v)) + + def test_not_g77(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') + for vs, _ in gfortran_version_strings: + v = fc.version_match(vs) + assert_(v is None, (vs, v)) + +class TestGFortranVersions: + def test_gfortran_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') + for vs, version in gfortran_version_strings: + v = fc.version_match(vs) + assert_(v == version, (vs, v)) + + def test_not_gfortran(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') + for vs, _ in g77_version_strings: + v = fc.version_match(vs) + assert_(v is None, (vs, v)) diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_fcompiler_intel.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_fcompiler_intel.py new file mode 100644 index 0000000000000000000000000000000000000000..1ef537096aa470851e3cc91e879f7d20f240bec1 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_fcompiler_intel.py @@ -0,0 +1,30 @@ +import numpy.distutils.fcompiler +from numpy.testing import assert_ + + +intel_32bit_version_strings = [ + ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications" + "running on Intel(R) 32, Version 11.1", '11.1'), +] + +intel_64bit_version_strings = [ + ("Intel(R) Fortran IA-64 Compiler Professional for applications" + "running on IA-64, Version 11.0", '11.0'), + ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications" + "running on Intel(R) 64, Version 11.1", '11.1') +] + +class TestIntelFCompilerVersions: + def test_32bit_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') + for vs, version in intel_32bit_version_strings: + v = fc.version_match(vs) + assert_(v == version) + + +class TestIntelEM64TFCompilerVersions: + def test_64bit_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') + for vs, version in intel_64bit_version_strings: + v = fc.version_match(vs) + assert_(v == version) diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py new file mode 100644 index 0000000000000000000000000000000000000000..36a67b3dfd5db522f6a7b3c404c3403243652bfb --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py @@ -0,0 +1,22 @@ +from numpy.testing import assert_ +import numpy.distutils.fcompiler + +nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release ' + '6.2(Chiyoda) Build 6200', '6.2'), + ('nagfor', 'NAG Fortran Compiler Release ' + '6.1(Tozai) Build 6136', '6.1'), + ('nagfor', 'NAG Fortran Compiler Release ' + '6.0(Hibiya) Build 1021', '6.0'), + ('nagfor', 'NAG Fortran Compiler Release ' + '5.3.2(971)', '5.3.2'), + ('nag', 'NAGWare Fortran 95 compiler Release 5.1' + '(347,355-367,375,380-383,389,394,399,401-402,407,' + '431,435,437,446,459-460,463,472,494,496,503,508,' + '511,517,529,555,557,565)', '5.1')] + +class TestNagFCompilerVersions: + def test_version_match(self): + for comp, vs, version in nag_version_strings: + fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp) + v = fc.version_match(vs) + assert_(v == version) diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_from_template.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_from_template.py new file mode 100644 index 0000000000000000000000000000000000000000..4c725f0eeb4ed76a2b2b468742d56b7f93b659eb --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_from_template.py @@ -0,0 +1,44 @@ + +from numpy.distutils.from_template import process_str +from numpy.testing import assert_equal + + +pyf_src = """ +python module foo + <_rd=real,double precision> + interface + subroutine foosub(tol) + <_rd>, intent(in,out) :: tol + end subroutine foosub + end interface +end python module foo +""" + +expected_pyf = """ +python module foo + interface + subroutine sfoosub(tol) + real, intent(in,out) :: tol + end subroutine sfoosub + subroutine dfoosub(tol) + double precision, intent(in,out) :: tol + end subroutine dfoosub + end interface +end python module foo +""" + + +def normalize_whitespace(s): + """ + Remove leading and trailing whitespace, and convert internal + stretches of whitespace to a single space. + """ + return ' '.join(s.split()) + + +def test_from_template(): + """Regression test for gh-10712.""" + pyf = process_str(pyf_src) + normalized_pyf = normalize_whitespace(pyf) + normalized_expected_pyf = normalize_whitespace(expected_pyf) + assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_log.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_log.py new file mode 100644 index 0000000000000000000000000000000000000000..7597e05f28713d51547c65d8235c4d731670665f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_log.py @@ -0,0 +1,34 @@ +import io +import re +from contextlib import redirect_stdout + +import pytest + +from numpy.distutils import log + + +def setup_module(): + f = io.StringIO() # changing verbosity also logs here, capture that + with redirect_stdout(f): + log.set_verbosity(2, force=True) # i.e. DEBUG + + +def teardown_module(): + log.set_verbosity(0, force=True) # the default + + +r_ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") + + +@pytest.mark.parametrize("func_name", ["error", "warn", "info", "debug"]) +def test_log_prefix(func_name): + func = getattr(log, func_name) + msg = f"{func_name} message" + f = io.StringIO() + with redirect_stdout(f): + func(msg) + out = f.getvalue() + assert out # sanity check + clean_out = r_ansi.sub("", out) + line = next(line for line in clean_out.splitlines()) + assert line == f"{func_name.upper()}: {msg}" diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..2a8145fc65eb71d8a78ac18de45d6d9877968c37 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py @@ -0,0 +1,42 @@ +import shutil +import subprocess +import sys +import pytest + +from numpy.distutils import mingw32ccompiler + + +@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test') +def test_build_import(): + '''Test the mingw32ccompiler.build_import_library, which builds a + `python.a` from the MSVC `python.lib` + ''' + + # make sure `nm.exe` exists and supports the current python version. This + # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit + try: + out = subprocess.check_output(['nm.exe', '--help']) + except FileNotFoundError: + pytest.skip("'nm.exe' not on path, is mingw installed?") + supported = out[out.find(b'supported targets:'):] + if sys.maxsize < 2**32: + if b'pe-i386' not in supported: + raise ValueError("'nm.exe' found but it does not support 32-bit " + "dlls when using 32-bit python. Supported " + "formats: '%s'" % supported) + elif b'pe-x86-64' not in supported: + raise ValueError("'nm.exe' found but it does not support 64-bit " + "dlls when using 64-bit python. Supported " + "formats: '%s'" % supported) + # Hide the import library to force a build + has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib() + if has_import_lib: + shutil.move(fullpath, fullpath + '.bak') + + try: + # Whew, now we can actually test the function + mingw32ccompiler.build_import_library() + + finally: + if has_import_lib: + shutil.move(fullpath + '.bak', fullpath) diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_misc_util.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_misc_util.py new file mode 100644 index 0000000000000000000000000000000000000000..93cc906ca5da1f106dd11549628fd423b298f0d7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_misc_util.py @@ -0,0 +1,82 @@ +from os.path import join, sep, dirname + +from numpy.distutils.misc_util import ( + appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info + ) +from numpy.testing import ( + assert_, assert_equal + ) + +ajoin = lambda *paths: join(*((sep,)+paths)) + +class TestAppendpath: + + def test_1(self): + assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) + assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) + assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) + assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) + + def test_2(self): + assert_equal(appendpath('prefix/sub', 'name'), + join('prefix', 'sub', 'name')) + assert_equal(appendpath('prefix/sub', 'sup/name'), + join('prefix', 'sub', 'sup', 'name')) + assert_equal(appendpath('/prefix/sub', '/prefix/name'), + ajoin('prefix', 'sub', 'name')) + + def test_3(self): + assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), + ajoin('prefix', 'sub', 'sup', 'name')) + assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), + ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) + assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), + ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) + +class TestMinrelpath: + + def test_1(self): + n = lambda path: path.replace('/', sep) + assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) + assert_equal(minrelpath('..'), '..') + assert_equal(minrelpath(n('aa/..')), '') + assert_equal(minrelpath(n('aa/../bb')), 'bb') + assert_equal(minrelpath(n('aa/bb/..')), 'aa') + assert_equal(minrelpath(n('aa/bb/../..')), '') + assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) + assert_equal(minrelpath(n('.././..')), n('../..')) + assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) + +class TestGpaths: + + def test_gpaths(self): + local_path = minrelpath(join(dirname(__file__), '..')) + ls = gpaths('command/*.py', local_path) + assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) + f = gpaths('system_info.py', local_path) + assert_(join(local_path, 'system_info.py') == f[0], repr(f)) + +class TestSharedExtension: + + def test_get_shared_lib_extension(self): + import sys + ext = get_shared_lib_extension(is_python_ext=False) + if sys.platform.startswith('linux'): + assert_equal(ext, '.so') + elif sys.platform.startswith('gnukfreebsd'): + assert_equal(ext, '.so') + elif sys.platform.startswith('darwin'): + assert_equal(ext, '.dylib') + elif sys.platform.startswith('win'): + assert_equal(ext, '.dll') + # just check for no crash + assert_(get_shared_lib_extension(is_python_ext=True)) + + +def test_installed_npymath_ini(): + # Regression test for gh-7707. If npymath.ini wasn't installed, then this + # will give an error. + info = get_info('npymath') + + assert isinstance(info, dict) + assert "define_macros" in info diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_npy_pkg_config.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_npy_pkg_config.py new file mode 100644 index 0000000000000000000000000000000000000000..b1a1e079e1822d6f9e297b60746a293270d622d7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_npy_pkg_config.py @@ -0,0 +1,84 @@ +import os + +from numpy.distutils.npy_pkg_config import read_config, parse_flags +from numpy.testing import temppath, assert_ + +simple = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[default] +cflags = -I/usr/include +libs = -L/usr/lib +""" +simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', + 'version': '0.1', 'name': 'foo'} + +simple_variable = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[variables] +prefix = /foo/bar +libdir = ${prefix}/lib +includedir = ${prefix}/include + +[default] +cflags = -I${includedir} +libs = -L${libdir} +""" +simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', + 'version': '0.1', 'name': 'foo'} + +class TestLibraryInfo: + def test_simple(self): + with temppath('foo.ini') as path: + with open(path, 'w') as f: + f.write(simple) + pkg = os.path.splitext(path)[0] + out = read_config(pkg) + + assert_(out.cflags() == simple_d['cflags']) + assert_(out.libs() == simple_d['libflags']) + assert_(out.name == simple_d['name']) + assert_(out.version == simple_d['version']) + + def test_simple_variable(self): + with temppath('foo.ini') as path: + with open(path, 'w') as f: + f.write(simple_variable) + pkg = os.path.splitext(path)[0] + out = read_config(pkg) + + assert_(out.cflags() == simple_variable_d['cflags']) + assert_(out.libs() == simple_variable_d['libflags']) + assert_(out.name == simple_variable_d['name']) + assert_(out.version == simple_variable_d['version']) + out.vars['prefix'] = '/Users/david' + assert_(out.cflags() == '-I/Users/david/include') + +class TestParseFlags: + def test_simple_cflags(self): + d = parse_flags("-I/usr/include") + assert_(d['include_dirs'] == ['/usr/include']) + + d = parse_flags("-I/usr/include -DFOO") + assert_(d['include_dirs'] == ['/usr/include']) + assert_(d['macros'] == ['FOO']) + + d = parse_flags("-I /usr/include -DFOO") + assert_(d['include_dirs'] == ['/usr/include']) + assert_(d['macros'] == ['FOO']) + + def test_simple_lflags(self): + d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") + assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + assert_(d['libraries'] == ['foo', 'bar']) + + d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") + assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + assert_(d['libraries'] == ['foo', 'bar']) diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_shell_utils.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_shell_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d39f7c8f5c29b7759f83d296e3073cdb3efd9728 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_shell_utils.py @@ -0,0 +1,79 @@ +import pytest +import subprocess +import json +import sys + +from numpy.distutils import _shell_utils +from numpy.testing import IS_WASM + +argv_cases = [ + [r'exe'], + [r'path/exe'], + [r'path\exe'], + [r'\\server\path\exe'], + [r'path to/exe'], + [r'path to\exe'], + + [r'exe', '--flag'], + [r'path/exe', '--flag'], + [r'path\exe', '--flag'], + [r'path to/exe', '--flag'], + [r'path to\exe', '--flag'], + + # flags containing literal quotes in their name + [r'path to/exe', '--flag-"quoted"'], + [r'path to\exe', '--flag-"quoted"'], + [r'path to/exe', '"--flag-quoted"'], + [r'path to\exe', '"--flag-quoted"'], +] + + +@pytest.fixture(params=[ + _shell_utils.WindowsParser, + _shell_utils.PosixParser +]) +def Parser(request): + return request.param + + +@pytest.fixture +def runner(Parser): + if Parser != _shell_utils.NativeParser: + pytest.skip('Unable to run with non-native parser') + + if Parser == _shell_utils.WindowsParser: + return lambda cmd: subprocess.check_output(cmd) + elif Parser == _shell_utils.PosixParser: + # posix has no non-shell string parsing + return lambda cmd: subprocess.check_output(cmd, shell=True) + else: + raise NotImplementedError + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.parametrize('argv', argv_cases) +def test_join_matches_subprocess(Parser, runner, argv): + """ + Test that join produces strings understood by subprocess + """ + # invoke python to return its arguments as json + cmd = [ + sys.executable, '-c', + 'import json, sys; print(json.dumps(sys.argv[1:]))' + ] + joined = Parser.join(cmd + argv) + json_out = runner(joined).decode() + assert json.loads(json_out) == argv + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.parametrize('argv', argv_cases) +def test_roundtrip(Parser, argv): + """ + Test that split is the inverse operation of join + """ + try: + joined = Parser.join(argv) + assert argv == Parser.split(joined) + except NotImplementedError: + pytest.skip("Not implemented") diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/test_system_info.py b/phivenv/Lib/site-packages/numpy/distutils/tests/test_system_info.py new file mode 100644 index 0000000000000000000000000000000000000000..4e9515b069bacda7d1277a6f558cfb08d36e2d4c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/test_system_info.py @@ -0,0 +1,334 @@ +import os +import shutil +import pytest +from tempfile import mkstemp, mkdtemp +from subprocess import Popen, PIPE +import importlib.metadata +from distutils.errors import DistutilsError + +from numpy.testing import assert_, assert_equal, assert_raises +from numpy.distutils import ccompiler, customized_ccompiler +from numpy.distutils.system_info import system_info, ConfigParser, mkl_info +from numpy.distutils.system_info import AliasedOptionError +from numpy.distutils.system_info import default_lib_dirs, default_include_dirs +from numpy.distutils import _shell_utils + + +try: + if importlib.metadata.version('setuptools') >= '60': + # pkg-resources gives deprecation warnings, and there may be more + # issues. We only support setuptools <60 + pytest.skip("setuptools is too new", allow_module_level=True) +except importlib.metadata.PackageNotFoundError: + # we don't require `setuptools`; if it is not found, continue + pass + + +def get_class(name, notfound_action=1): + """ + notfound_action: + 0 - do nothing + 1 - display warning message + 2 - raise error + """ + cl = {'temp1': Temp1Info, + 'temp2': Temp2Info, + 'duplicate_options': DuplicateOptionInfo, + }.get(name.lower(), _system_info) + return cl() + +simple_site = """ +[ALL] +library_dirs = {dir1:s}{pathsep:s}{dir2:s} +libraries = {lib1:s},{lib2:s} +extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os +runtime_library_dirs = {dir1:s} + +[temp1] +library_dirs = {dir1:s} +libraries = {lib1:s} +runtime_library_dirs = {dir1:s} + +[temp2] +library_dirs = {dir2:s} +libraries = {lib2:s} +extra_link_args = -Wl,-rpath={lib2_escaped:s} +rpath = {dir2:s} + +[duplicate_options] +mylib_libs = {lib1:s} +libraries = {lib2:s} +""" +site_cfg = simple_site + +fakelib_c_text = """ +/* This file is generated from numpy/distutils/testing/test_system_info.py */ +#include +void foo(void) { + printf("Hello foo"); +} +void bar(void) { + printf("Hello bar"); +} +""" + +def have_compiler(): + """ Return True if there appears to be an executable compiler + """ + compiler = customized_ccompiler() + try: + cmd = compiler.compiler # Unix compilers + except AttributeError: + try: + if not compiler.initialized: + compiler.initialize() # MSVC is different + except (DistutilsError, ValueError): + return False + cmd = [compiler.cc] + try: + p = Popen(cmd, stdout=PIPE, stderr=PIPE) + p.stdout.close() + p.stderr.close() + p.wait() + except OSError: + return False + return True + + +HAVE_COMPILER = have_compiler() + + +class _system_info(system_info): + + def __init__(self, + default_lib_dirs=default_lib_dirs, + default_include_dirs=default_include_dirs, + verbosity=1, + ): + self.__class__.info = {} + self.local_prefixes = [] + defaults = {'library_dirs': '', + 'include_dirs': '', + 'runtime_library_dirs': '', + 'rpath': '', + 'src_dirs': '', + 'search_static_first': "0", + 'extra_compile_args': '', + 'extra_link_args': ''} + self.cp = ConfigParser(defaults) + # We have to parse the config files afterwards + # to have a consistent temporary filepath + + def _check_libs(self, lib_dirs, libs, opt_libs, exts): + """Override _check_libs to return with all dirs """ + info = {'libraries': libs, 'library_dirs': lib_dirs} + return info + + +class Temp1Info(_system_info): + """For testing purposes""" + section = 'temp1' + + +class Temp2Info(_system_info): + """For testing purposes""" + section = 'temp2' + +class DuplicateOptionInfo(_system_info): + """For testing purposes""" + section = 'duplicate_options' + + +class TestSystemInfoReading: + + def setup_method(self): + """ Create the libraries """ + # Create 2 sources and 2 libraries + self._dir1 = mkdtemp() + self._src1 = os.path.join(self._dir1, 'foo.c') + self._lib1 = os.path.join(self._dir1, 'libfoo.so') + self._dir2 = mkdtemp() + self._src2 = os.path.join(self._dir2, 'bar.c') + self._lib2 = os.path.join(self._dir2, 'libbar.so') + # Update local site.cfg + global simple_site, site_cfg + site_cfg = simple_site.format(**{ + 'dir1': self._dir1, + 'lib1': self._lib1, + 'dir2': self._dir2, + 'lib2': self._lib2, + 'pathsep': os.pathsep, + 'lib2_escaped': _shell_utils.NativeParser.join([self._lib2]) + }) + # Write site.cfg + fd, self._sitecfg = mkstemp() + os.close(fd) + with open(self._sitecfg, 'w') as fd: + fd.write(site_cfg) + # Write the sources + with open(self._src1, 'w') as fd: + fd.write(fakelib_c_text) + with open(self._src2, 'w') as fd: + fd.write(fakelib_c_text) + # We create all class-instances + + def site_and_parse(c, site_cfg): + c.files = [site_cfg] + c.parse_config_files() + return c + self.c_default = site_and_parse(get_class('default'), self._sitecfg) + self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) + self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) + self.c_dup_options = site_and_parse(get_class('duplicate_options'), + self._sitecfg) + + def teardown_method(self): + # Do each removal separately + try: + shutil.rmtree(self._dir1) + except Exception: + pass + try: + shutil.rmtree(self._dir2) + except Exception: + pass + try: + os.remove(self._sitecfg) + except Exception: + pass + + def test_all(self): + # Read in all information in the ALL block + tsi = self.c_default + assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) + assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) + assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) + extra = tsi.calc_extra_info() + assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os']) + + def test_temp1(self): + # Read in all information in the temp1 block + tsi = self.c_temp1 + assert_equal(tsi.get_lib_dirs(), [self._dir1]) + assert_equal(tsi.get_libraries(), [self._lib1]) + assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) + + def test_temp2(self): + # Read in all information in the temp2 block + tsi = self.c_temp2 + assert_equal(tsi.get_lib_dirs(), [self._dir2]) + assert_equal(tsi.get_libraries(), [self._lib2]) + # Now from rpath and not runtime_library_dirs + assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) + extra = tsi.calc_extra_info() + assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) + + def test_duplicate_options(self): + # Ensure that duplicates are raising an AliasedOptionError + tsi = self.c_dup_options + assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries") + assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1]) + assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2]) + + @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") + def test_compile1(self): + # Compile source and link the first source + c = customized_ccompiler() + previousDir = os.getcwd() + try: + # Change directory to not screw up directories + os.chdir(self._dir1) + c.compile([os.path.basename(self._src1)], output_dir=self._dir1) + # Ensure that the object exists + assert_(os.path.isfile(self._src1.replace('.c', '.o')) or + os.path.isfile(self._src1.replace('.c', '.obj'))) + finally: + os.chdir(previousDir) + + @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") + @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()), + reason="Fails with MSVC compiler ") + def test_compile2(self): + # Compile source and link the second source + tsi = self.c_temp2 + c = customized_ccompiler() + extra_link_args = tsi.calc_extra_info()['extra_link_args'] + previousDir = os.getcwd() + try: + # Change directory to not screw up directories + os.chdir(self._dir2) + c.compile([os.path.basename(self._src2)], output_dir=self._dir2, + extra_postargs=extra_link_args) + # Ensure that the object exists + assert_(os.path.isfile(self._src2.replace('.c', '.o'))) + finally: + os.chdir(previousDir) + + HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", []) + + @pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if " + "numpy is built with MKL support")) + def test_overrides(self): + previousDir = os.getcwd() + cfg = os.path.join(self._dir1, 'site.cfg') + shutil.copy(self._sitecfg, cfg) + try: + os.chdir(self._dir1) + # Check that the '[ALL]' section does not override + # missing values from other sections + info = mkl_info() + lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep) + assert info.get_lib_dirs() != lib_dirs + + # But if we copy the values to a '[mkl]' section the value + # is correct + with open(cfg) as fid: + mkl = fid.read().replace('[ALL]', '[mkl]', 1) + with open(cfg, 'w') as fid: + fid.write(mkl) + info = mkl_info() + assert info.get_lib_dirs() == lib_dirs + + # Also, the values will be taken from a section named '[DEFAULT]' + with open(cfg) as fid: + dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1) + with open(cfg, 'w') as fid: + fid.write(dflt) + info = mkl_info() + assert info.get_lib_dirs() == lib_dirs + finally: + os.chdir(previousDir) + + +def test_distutils_parse_env_order(monkeypatch): + from numpy.distutils.system_info import _parse_env_order + env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER' + + base_order = list('abcdef') + + monkeypatch.setenv(env, 'b,i,e,f') + order, unknown = _parse_env_order(base_order, env) + assert len(order) == 3 + assert order == list('bef') + assert len(unknown) == 1 + + # For when LAPACK/BLAS optimization is disabled + monkeypatch.setenv(env, '') + order, unknown = _parse_env_order(base_order, env) + assert len(order) == 0 + assert len(unknown) == 0 + + for prefix in '^!': + monkeypatch.setenv(env, f'{prefix}b,i,e') + order, unknown = _parse_env_order(base_order, env) + assert len(order) == 4 + assert order == list('acdf') + assert len(unknown) == 1 + + with pytest.raises(ValueError): + monkeypatch.setenv(env, 'b,^e,i') + _parse_env_order(base_order, env) + + with pytest.raises(ValueError): + monkeypatch.setenv(env, '!b,^e,i') + _parse_env_order(base_order, env) diff --git a/phivenv/Lib/site-packages/numpy/distutils/tests/utilities.py b/phivenv/Lib/site-packages/numpy/distutils/tests/utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..4cad87c5add9de50530dd2ab6b5bc237853775ea --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/tests/utilities.py @@ -0,0 +1,90 @@ +# Kanged out of numpy.f2py.tests.util for test_build_ext +from numpy.testing import IS_WASM +import textwrap +import shutil +import tempfile +import os +import re +import subprocess +import sys + +# +# Check if compilers are available at all... +# + +_compiler_status = None + + +def _get_compiler_status(): + global _compiler_status + if _compiler_status is not None: + return _compiler_status + + _compiler_status = (False, False, False) + if IS_WASM: + # Can't run compiler from inside WASM. + return _compiler_status + + # XXX: this is really ugly. But I don't know how to invoke Distutils + # in a safer way... + code = textwrap.dedent( + f"""\ + import os + import sys + sys.path = {repr(sys.path)} + + def configuration(parent_name='',top_path=None): + global config + from numpy.distutils.misc_util import Configuration + config = Configuration('', parent_name, top_path) + return config + + from numpy.distutils.core import setup + setup(configuration=configuration) + + config_cmd = config.get_config_cmd() + have_c = config_cmd.try_compile('void foo() {{}}') + print('COMPILERS:%%d,%%d,%%d' %% (have_c, + config.have_f77c(), + config.have_f90c())) + sys.exit(99) + """ + ) + code = code % dict(syspath=repr(sys.path)) + + tmpdir = tempfile.mkdtemp() + try: + script = os.path.join(tmpdir, "setup.py") + + with open(script, "w") as f: + f.write(code) + + cmd = [sys.executable, "setup.py", "config"] + p = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmpdir + ) + out, err = p.communicate() + finally: + shutil.rmtree(tmpdir) + + m = re.search(rb"COMPILERS:(\d+),(\d+),(\d+)", out) + if m: + _compiler_status = ( + bool(int(m.group(1))), + bool(int(m.group(2))), + bool(int(m.group(3))), + ) + # Finished + return _compiler_status + + +def has_c_compiler(): + return _get_compiler_status()[0] + + +def has_f77_compiler(): + return _get_compiler_status()[1] + + +def has_f90_compiler(): + return _get_compiler_status()[2] diff --git a/phivenv/Lib/site-packages/numpy/distutils/unixccompiler.py b/phivenv/Lib/site-packages/numpy/distutils/unixccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..725b6d61801dfcb14c1d60d66ea43596b54856b5 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/distutils/unixccompiler.py @@ -0,0 +1,141 @@ +""" +unixccompiler - can handle very long argument lists for ar. + +""" +import os +import sys +import subprocess +import shlex + +from distutils.errors import CompileError, DistutilsExecError, LibError +from distutils.unixccompiler import UnixCCompiler +from numpy.distutils.ccompiler import replace_method +from numpy.distutils.misc_util import _commandline_dep_string +from numpy.distutils import log + +# Note that UnixCCompiler._compile appeared in Python 2.3 +def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): + """Compile a single source files with a Unix-style compiler.""" + # HP ad-hoc fix, see ticket 1383 + ccomp = self.compiler_so + if ccomp[0] == 'aCC': + # remove flags that will trigger ANSI-C mode for aCC + if '-Ae' in ccomp: + ccomp.remove('-Ae') + if '-Aa' in ccomp: + ccomp.remove('-Aa') + # add flags for (almost) sane C++ handling + ccomp += ['-AA'] + self.compiler_so = ccomp + # ensure OPT environment variable is read + if 'OPT' in os.environ: + # XXX who uses this? + from sysconfig import get_config_vars + opt = shlex.join(shlex.split(os.environ['OPT'])) + gcv_opt = shlex.join(shlex.split(get_config_vars('OPT')[0])) + ccomp_s = shlex.join(self.compiler_so) + if opt not in ccomp_s: + ccomp_s = ccomp_s.replace(gcv_opt, opt) + self.compiler_so = shlex.split(ccomp_s) + llink_s = shlex.join(self.linker_so) + if opt not in llink_s: + self.linker_so = self.linker_so + shlex.split(opt) + + display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) + + # gcc style automatic dependencies, outputs a makefile (-MF) that lists + # all headers needed by a c file as a side effect of compilation (-MMD) + if getattr(self, '_auto_depends', False): + deps = ['-MMD', '-MF', obj + '.d'] + else: + deps = [] + + try: + self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps + + extra_postargs, display = display) + except DistutilsExecError as e: + msg = str(e) + raise CompileError(msg) from None + + # add commandline flags to dependency file + if deps: + # After running the compiler, the file created will be in EBCDIC + # but will not be tagged as such. This tags it so the file does not + # have multiple different encodings being written to it + if sys.platform == 'zos': + subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d']) + with open(obj + '.d', 'a') as f: + f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts)) + +replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) + + +def UnixCCompiler_create_static_lib(self, objects, output_libname, + output_dir=None, debug=0, target_lang=None): + """ + Build a static library in a separate sub-process. + + Parameters + ---------- + objects : list or tuple of str + List of paths to object files used to build the static library. + output_libname : str + The library name as an absolute or relative (if `output_dir` is used) + path. + output_dir : str, optional + The path to the output directory. Default is None, in which case + the ``output_dir`` attribute of the UnixCCompiler instance. + debug : bool, optional + This parameter is not used. + target_lang : str, optional + This parameter is not used. + + Returns + ------- + None + + """ + objects, output_dir = self._fix_object_args(objects, output_dir) + + output_filename = \ + self.library_filename(output_libname, output_dir=output_dir) + + if self._need_link(objects, output_filename): + try: + # previous .a may be screwed up; best to remove it first + # and recreate. + # Also, ar on OS X doesn't handle updating universal archives + os.unlink(output_filename) + except OSError: + pass + self.mkpath(os.path.dirname(output_filename)) + tmp_objects = objects + self.objects + while tmp_objects: + objects = tmp_objects[:50] + tmp_objects = tmp_objects[50:] + display = '%s: adding %d object files to %s' % ( + os.path.basename(self.archiver[0]), + len(objects), output_filename) + self.spawn(self.archiver + [output_filename] + objects, + display = display) + + # Not many Unices required ranlib anymore -- SunOS 4.x is, I + # think the only major Unix that does. Maybe we need some + # platform intelligence here to skip ranlib if it's not + # needed -- or maybe Python's configure script took care of + # it for us, hence the check for leading colon. + if self.ranlib: + display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), + output_filename) + try: + self.spawn(self.ranlib + [output_filename], + display = display) + except DistutilsExecError as e: + msg = str(e) + raise LibError(msg) from None + else: + log.debug("skipping %s (up-to-date)", output_filename) + return + +replace_method(UnixCCompiler, 'create_static_lib', + UnixCCompiler_create_static_lib) diff --git a/phivenv/Lib/site-packages/numpy/doc/__pycache__/ufuncs.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/doc/__pycache__/ufuncs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..218b6a580e1d08d1a5b50e563f7dc87832d3dcc7 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/doc/__pycache__/ufuncs.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/doc/ufuncs.py b/phivenv/Lib/site-packages/numpy/doc/ufuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..fe86551baa99358fed1b1758a21b04d78807731b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/doc/ufuncs.py @@ -0,0 +1,137 @@ +""" +=================== +Universal Functions +=================== + +Ufuncs are, generally speaking, mathematical functions or operations that are +applied element-by-element to the contents of an array. That is, the result +in each output array element only depends on the value in the corresponding +input array (or arrays) and on no other array elements. NumPy comes with a +large suite of ufuncs, and scipy extends that suite substantially. The simplest +example is the addition operator: :: + + >>> np.array([0,2,3,4]) + np.array([1,1,-1,2]) + array([1, 3, 2, 6]) + +The ufunc module lists all the available ufuncs in numpy. Documentation on +the specific ufuncs may be found in those modules. This documentation is +intended to address the more general aspects of ufuncs common to most of +them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.) +have equivalent functions defined (e.g. add() for +) + +Type coercion +============= + +What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of +two different types? What is the type of the result? Typically, the result is +the higher of the two types. For example: :: + + float32 + float64 -> float64 + int8 + int32 -> int32 + int16 + float32 -> float32 + float32 + complex64 -> complex64 + +There are some less obvious cases generally involving mixes of types +(e.g. uints, ints and floats) where equal bit sizes for each are not +capable of saving all the information in a different type of equivalent +bit size. Some examples are int32 vs float32 or uint32 vs int32. +Generally, the result is the higher type of larger size than both +(if available). So: :: + + int32 + float32 -> float64 + uint32 + int32 -> int64 + +Finally, the type coercion behavior when expressions involve Python +scalars is different than that seen for arrays. Since Python has a +limited number of types, combining a Python int with a dtype=np.int8 +array does not coerce to the higher type but instead, the type of the +array prevails. So the rules for Python scalars combined with arrays is +that the result will be that of the array equivalent the Python scalar +if the Python scalar is of a higher 'kind' than the array (e.g., float +vs. int), otherwise the resultant type will be that of the array. +For example: :: + + Python int + int8 -> int8 + Python float + int8 -> float64 + +ufunc methods +============= + +Binary ufuncs support 4 methods. + +**.reduce(arr)** applies the binary operator to elements of the array in + sequence. For example: :: + + >>> np.add.reduce(np.arange(10)) # adds all elements of array + 45 + +For multidimensional arrays, the first dimension is reduced by default: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5)) + array([ 5, 7, 9, 11, 13]) + +The axis keyword can be used to specify different axes to reduce: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1) + array([10, 35]) + +**.accumulate(arr)** applies the binary operator and generates an +equivalently shaped array that includes the accumulated amount for each +element of the array. A couple examples: :: + + >>> np.add.accumulate(np.arange(10)) + array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) + >>> np.multiply.accumulate(np.arange(1,9)) + array([ 1, 2, 6, 24, 120, 720, 5040, 40320]) + +The behavior for multidimensional arrays is the same as for .reduce(), +as is the use of the axis keyword). + +**.reduceat(arr,indices)** allows one to apply reduce to selected parts + of an array. It is a difficult method to understand. See the documentation + at: + +**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and + arr2. It will work on multidimensional arrays (the shape of the result is + the concatenation of the two input shapes.: :: + + >>> np.multiply.outer(np.arange(3),np.arange(4)) + array([[0, 0, 0, 0], + [0, 1, 2, 3], + [0, 2, 4, 6]]) + +Output arguments +================ + +All ufuncs accept an optional output array. The array must be of the expected +output shape. Beware that if the type of the output array is of a different +(and lower) type than the output result, the results may be silently truncated +or otherwise corrupted in the downcast to the lower type. This usage is useful +when one wants to avoid creating large temporary arrays and instead allows one +to reuse the same array memory repeatedly (at the expense of not being able to +use more convenient operator notation in expressions). Note that when the +output argument is used, the ufunc still returns a reference to the result. + + >>> x = np.arange(2) + >>> np.add(np.arange(2),np.arange(2.),x) + array([0, 2]) + >>> x + array([0, 2]) + +and & or as ufuncs +================== + +Invariably people try to use the python 'and' and 'or' as logical operators +(and quite understandably). But these operators do not behave as normal +operators since Python treats these quite differently. They cannot be +overloaded with array equivalents. Thus using 'and' or 'or' with an array +results in an error. There are two alternatives: + + 1) use the ufunc functions logical_and() and logical_or(). + 2) use the bitwise operators & and \\|. The drawback of these is that if + the arguments to these operators are not boolean arrays, the result is + likely incorrect. On the other hand, most usages of logical_and and + logical_or are with boolean arrays. As long as one is careful, this is + a convenient way to apply these operators. + +""" diff --git a/phivenv/Lib/site-packages/numpy/dtypes.py b/phivenv/Lib/site-packages/numpy/dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..6906ae0984870557ad5c5094f07e8bfc250be512 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/dtypes.py @@ -0,0 +1,41 @@ +""" +This module is home to specific dtypes related functionality and their classes. +For more general information about dtypes, also see `numpy.dtype` and +:ref:`arrays.dtypes`. + +Similar to the builtin ``types`` module, this submodule defines types (classes) +that are not widely used directly. + +.. versionadded:: NumPy 1.25 + + The dtypes module is new in NumPy 1.25. Previously DType classes were + only accessible indirectly. + + +DType classes +------------- + +The following are the classes of the corresponding NumPy dtype instances and +NumPy scalar types. The classes can be used in ``isinstance`` checks and can +also be instantiated or used directly. Direct use of these classes is not +typical, since their scalar counterparts (e.g. ``np.float64``) or strings +like ``"float64"`` can be used. +""" + +# See doc/source/reference/routines.dtypes.rst for module-level docs + +__all__ = [] + + +def _add_dtype_helper(DType, alias): + # Function to add DTypes a bit more conveniently without channeling them + # through `numpy._core._multiarray_umath` namespace or similar. + from numpy import dtypes + + setattr(dtypes, DType.__name__, DType) + __all__.append(DType.__name__) + + if alias: + alias = alias.removeprefix("numpy.dtypes.") + setattr(dtypes, alias, DType) + __all__.append(alias) diff --git a/phivenv/Lib/site-packages/numpy/dtypes.pyi b/phivenv/Lib/site-packages/numpy/dtypes.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b5f385c554196a1596afd018e7217f496e2a84a7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/dtypes.pyi @@ -0,0 +1,43 @@ +import numpy as np + + +__all__: list[str] + +# Boolean: +BoolDType = np.dtype[np.bool] +# Sized integers: +Int8DType = np.dtype[np.int8] +UInt8DType = np.dtype[np.uint8] +Int16DType = np.dtype[np.int16] +UInt16DType = np.dtype[np.uint16] +Int32DType = np.dtype[np.int32] +UInt32DType = np.dtype[np.uint32] +Int64DType = np.dtype[np.int64] +UInt64DType = np.dtype[np.uint64] +# Standard C-named version/alias: +ByteDType = np.dtype[np.byte] +UByteDType = np.dtype[np.ubyte] +ShortDType = np.dtype[np.short] +UShortDType = np.dtype[np.ushort] +IntDType = np.dtype[np.intc] +UIntDType = np.dtype[np.uintc] +LongDType = np.dtype[np.long] +ULongDType = np.dtype[np.ulong] +LongLongDType = np.dtype[np.longlong] +ULongLongDType = np.dtype[np.ulonglong] +# Floats +Float16DType = np.dtype[np.float16] +Float32DType = np.dtype[np.float32] +Float64DType = np.dtype[np.float64] +LongDoubleDType = np.dtype[np.longdouble] +# Complex: +Complex64DType = np.dtype[np.complex64] +Complex128DType = np.dtype[np.complex128] +CLongDoubleDType = np.dtype[np.clongdouble] +# Others: +ObjectDType = np.dtype[np.object_] +BytesDType = np.dtype[np.bytes_] +StrDType = np.dtype[np.str_] +VoidDType = np.dtype[np.void] +DateTime64DType = np.dtype[np.datetime64] +TimeDelta64DType = np.dtype[np.timedelta64] diff --git a/phivenv/Lib/site-packages/numpy/exceptions.py b/phivenv/Lib/site-packages/numpy/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..28b44b3fda435d697837613a4e659b896277663c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/exceptions.py @@ -0,0 +1,243 @@ +""" +Exceptions and Warnings (:mod:`numpy.exceptions`) +================================================= + +General exceptions used by NumPy. Note that some exceptions may be module +specific, such as linear algebra errors. + +.. versionadded:: NumPy 1.25 + + The exceptions module is new in NumPy 1.25. Older exceptions remain + available through the main NumPy namespace for compatibility. + +.. currentmodule:: numpy.exceptions + +Warnings +-------- +.. autosummary:: + :toctree: generated/ + + ComplexWarning Given when converting complex to real. + VisibleDeprecationWarning Same as a DeprecationWarning, but more visible. + RankWarning Issued when the design matrix is rank deficient. + +Exceptions +---------- +.. autosummary:: + :toctree: generated/ + + AxisError Given when an axis was invalid. + DTypePromotionError Given when no common dtype could be found. + TooHardError Error specific to `numpy.shares_memory`. + +""" + + +__all__ = [ + "ComplexWarning", "VisibleDeprecationWarning", "ModuleDeprecationWarning", + "TooHardError", "AxisError", "DTypePromotionError"] + + +# Disallow reloading this module so as to preserve the identities of the +# classes defined here. +if '_is_loaded' in globals(): + raise RuntimeError('Reloading numpy._globals is not allowed') +_is_loaded = True + + +class ComplexWarning(RuntimeWarning): + """ + The warning raised when casting a complex dtype to a real dtype. + + As implemented, casting a complex number to a real discards its imaginary + part, but this behavior may not be what the user actually wants. + + """ + pass + + +class ModuleDeprecationWarning(DeprecationWarning): + """Module deprecation warning. + + .. warning:: + + This warning should not be used, since nose testing is not relevant + anymore. + + The nose tester turns ordinary Deprecation warnings into test failures. + That makes it hard to deprecate whole modules, because they get + imported by default. So this is a special Deprecation warning that the + nose tester will let pass without making tests fail. + + """ + pass + + +class VisibleDeprecationWarning(UserWarning): + """Visible deprecation warning. + + By default, python will not show deprecation warnings, so this class + can be used when a very visible warning is helpful, for example because + the usage is most likely a user bug. + + """ + pass + + +class RankWarning(RuntimeWarning): + """Matrix rank warning. + + Issued by polynomial functions when the design matrix is rank deficient. + + """ + pass + + +# Exception used in shares_memory() +class TooHardError(RuntimeError): + """max_work was exceeded. + + This is raised whenever the maximum number of candidate solutions + to consider specified by the ``max_work`` parameter is exceeded. + Assigning a finite number to max_work may have caused the operation + to fail. + + """ + pass + + +class AxisError(ValueError, IndexError): + """Axis supplied was invalid. + + This is raised whenever an ``axis`` parameter is specified that is larger + than the number of array dimensions. + For compatibility with code written against older numpy versions, which + raised a mixture of :exc:`ValueError` and :exc:`IndexError` for this + situation, this exception subclasses both to ensure that + ``except ValueError`` and ``except IndexError`` statements continue + to catch ``AxisError``. + + .. versionadded:: 1.13 + + Parameters + ---------- + axis : int or str + The out of bounds axis or a custom exception message. + If an axis is provided, then `ndim` should be specified as well. + ndim : int, optional + The number of array dimensions. + msg_prefix : str, optional + A prefix for the exception message. + + Attributes + ---------- + axis : int, optional + The out of bounds axis or ``None`` if a custom exception + message was provided. This should be the axis as passed by + the user, before any normalization to resolve negative indices. + + .. versionadded:: 1.22 + ndim : int, optional + The number of array dimensions or ``None`` if a custom exception + message was provided. + + .. versionadded:: 1.22 + + + Examples + -------- + >>> array_1d = np.arange(10) + >>> np.cumsum(array_1d, axis=1) + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axis 1 is out of bounds for array of dimension 1 + + Negative axes are preserved: + + >>> np.cumsum(array_1d, axis=-2) + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axis -2 is out of bounds for array of dimension 1 + + The class constructor generally takes the axis and arrays' + dimensionality as arguments: + + >>> print(np.exceptions.AxisError(2, 1, msg_prefix='error')) + error: axis 2 is out of bounds for array of dimension 1 + + Alternatively, a custom exception message can be passed: + + >>> print(np.exceptions.AxisError('Custom error message')) + Custom error message + + """ + + __slots__ = ("axis", "ndim", "_msg") + + def __init__(self, axis, ndim=None, msg_prefix=None): + if ndim is msg_prefix is None: + # single-argument form: directly set the error message + self._msg = axis + self.axis = None + self.ndim = None + else: + self._msg = msg_prefix + self.axis = axis + self.ndim = ndim + + def __str__(self): + axis = self.axis + ndim = self.ndim + + if axis is ndim is None: + return self._msg + else: + msg = f"axis {axis} is out of bounds for array of dimension {ndim}" + if self._msg is not None: + msg = f"{self._msg}: {msg}" + return msg + + +class DTypePromotionError(TypeError): + """Multiple DTypes could not be converted to a common one. + + This exception derives from ``TypeError`` and is raised whenever dtypes + cannot be converted to a single common one. This can be because they + are of a different category/class or incompatible instances of the same + one (see Examples). + + Notes + ----- + Many functions will use promotion to find the correct result and + implementation. For these functions the error will typically be chained + with a more specific error indicating that no implementation was found + for the input dtypes. + + Typically promotion should be considered "invalid" between the dtypes of + two arrays when `arr1 == arr2` can safely return all ``False`` because the + dtypes are fundamentally different. + + Examples + -------- + Datetimes and complex numbers are incompatible classes and cannot be + promoted: + + >>> np.result_type(np.dtype("M8[s]"), np.complex128) + DTypePromotionError: The DType could not + be promoted by . This means that no common + DType exists for the given inputs. For example they cannot be stored in a + single array unless the dtype is `object`. The full list of DTypes is: + (, ) + + For example for structured dtypes, the structure can mismatch and the + same ``DTypePromotionError`` is given when two structured dtypes with + a mismatch in their number of fields is given: + + >>> dtype1 = np.dtype([("field1", np.float64), ("field2", np.int64)]) + >>> dtype2 = np.dtype([("field1", np.float64)]) + >>> np.promote_types(dtype1, dtype2) + DTypePromotionError: field names `('field1', 'field2')` and `('field1',)` + mismatch. + + """ + pass diff --git a/phivenv/Lib/site-packages/numpy/exceptions.pyi b/phivenv/Lib/site-packages/numpy/exceptions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9717f0cc7c940892459fcf6ac25557d57561aa5b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/exceptions.pyi @@ -0,0 +1,19 @@ +from typing import overload + +__all__: list[str] + +class ComplexWarning(RuntimeWarning): ... +class ModuleDeprecationWarning(DeprecationWarning): ... +class VisibleDeprecationWarning(UserWarning): ... +class RankWarning(RuntimeWarning): ... +class TooHardError(RuntimeError): ... +class DTypePromotionError(TypeError): ... + +class AxisError(ValueError, IndexError): + axis: None | int + ndim: None | int + @overload + def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ... + @overload + def __init__(self, axis: int, ndim: int, msg_prefix: None | str = ...) -> None: ... + def __str__(self) -> str: ... diff --git a/phivenv/Lib/site-packages/numpy/f2py/__init__.py b/phivenv/Lib/site-packages/numpy/f2py/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..614953719354bc7311717f2cbb549a0a5c9e903c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/__init__.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +"""Fortran to Python Interface Generator. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the terms +of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +__all__ = ['run_main', 'get_include'] + +import sys +import subprocess +import os +import warnings + +from numpy.exceptions import VisibleDeprecationWarning +from . import f2py2e +from . import diagnose + +run_main = f2py2e.run_main +main = f2py2e.main + + +def get_include(): + """ + Return the directory that contains the ``fortranobject.c`` and ``.h`` files. + + .. note:: + + This function is not needed when building an extension with + `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files + in one go. + + Python extension modules built with f2py-generated code need to use + ``fortranobject.c`` as a source file, and include the ``fortranobject.h`` + header. This function can be used to obtain the directory containing + both of these files. + + Returns + ------- + include_path : str + Absolute path to the directory containing ``fortranobject.c`` and + ``fortranobject.h``. + + Notes + ----- + .. versionadded:: 1.21.1 + + Unless the build system you are using has specific support for f2py, + building a Python extension using a ``.pyf`` signature file is a two-step + process. For a module ``mymod``: + + * Step 1: run ``python -m numpy.f2py mymod.pyf --quiet``. This + generates ``mymodmodule.c`` and (if needed) + ``mymod-f2pywrappers.f`` files next to ``mymod.pyf``. + * Step 2: build your Python extension module. This requires the + following source files: + + * ``mymodmodule.c`` + * ``mymod-f2pywrappers.f`` (if it was generated in Step 1) + * ``fortranobject.c`` + + See Also + -------- + numpy.get_include : function that returns the numpy include directory + + """ + return os.path.join(os.path.dirname(__file__), 'src') + + +def __getattr__(attr): + + # Avoid importing things that aren't needed for building + # which might import the main numpy module + if attr == "test": + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + return test + + else: + raise AttributeError("module {!r} has no attribute " + "{!r}".format(__name__, attr)) + + +def __dir__(): + return list(globals().keys() | {"test"}) diff --git a/phivenv/Lib/site-packages/numpy/f2py/__init__.pyi b/phivenv/Lib/site-packages/numpy/f2py/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2d2c2434c000fb519b9de1a37f99a604aef8f004 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/__init__.pyi @@ -0,0 +1,42 @@ +import os +import subprocess +from collections.abc import Iterable +from typing import Literal as L, Any, overload, TypedDict + +from numpy._pytesttester import PytestTester + +class _F2PyDictBase(TypedDict): + csrc: list[str] + h: list[str] + +class _F2PyDict(_F2PyDictBase, total=False): + fsrc: list[str] + ltx: list[str] + +__all__: list[str] +test: PytestTester + +def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]: ... + +@overload +def compile( # type: ignore[misc] + source: str | bytes, + modulename: str = ..., + extra_args: str | list[str] = ..., + verbose: bool = ..., + source_fn: None | str | bytes | os.PathLike[Any] = ..., + extension: L[".f", ".f90"] = ..., + full_output: L[False] = ..., +) -> int: ... +@overload +def compile( + source: str | bytes, + modulename: str = ..., + extra_args: str | list[str] = ..., + verbose: bool = ..., + source_fn: None | str | bytes | os.PathLike[Any] = ..., + extension: L[".f", ".f90"] = ..., + full_output: L[True] = ..., +) -> subprocess.CompletedProcess[bytes]: ... + +def get_include() -> str: ... diff --git a/phivenv/Lib/site-packages/numpy/f2py/__main__.py b/phivenv/Lib/site-packages/numpy/f2py/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..19ced88500139d62fc99f027bc467b07ee59bb26 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/__main__.py @@ -0,0 +1,5 @@ +# See: +# https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e +from numpy.f2py.f2py2e import main + +main() diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bc5f02bc3b1515a0caaeed64d79162b36362dbf Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/__main__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/__main__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c08ef1b7fc59d74e6f699936400016a2e3e3049 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/__main__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/__version__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/__version__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0d89767dcbfbe18af157c4ad98443c040e8a91a Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/__version__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/_isocbind.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/_isocbind.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c1a2290f946e1cab2334a05b560458745167acb Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/_isocbind.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/_src_pyf.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/_src_pyf.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54132469f918dae8c555caca4c86e10b626ccf33 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/_src_pyf.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/auxfuncs.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/auxfuncs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..274e0fe43fc044696d337772d39bb0e384cb47cf Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/auxfuncs.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/capi_maps.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/capi_maps.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7070f0580532e3dd3365d5c7b6c6f43a3d8ea396 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/capi_maps.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/cb_rules.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/cb_rules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01f07c86b9f5972fe04fd81d0621ba6a09152611 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/cb_rules.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/cfuncs.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/cfuncs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1115a7e4918cda3c9abbf1c42b555d199b72e9e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/cfuncs.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/common_rules.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/common_rules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8106bd8530bfbf7983cf97b2595f4bcb8bb76707 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/common_rules.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/crackfortran.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/crackfortran.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68a438e71c31cafc86ddf06ccc5a24ae1df4dc03 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/crackfortran.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/diagnose.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/diagnose.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fe3c50c3b1ca2b523769d0fd87c6d5c3fd04480 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/diagnose.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/f2py2e.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/f2py2e.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0767d090b1e66dca99623a62e5d177ae9e299eb Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/f2py2e.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/f90mod_rules.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/f90mod_rules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0533feb0d94efaddc3bb6692fc736898a13f96b0 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/f90mod_rules.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/func2subr.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/func2subr.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e69d64490ce40f39a79ee19f7ce68f11212fd0ab Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/func2subr.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/rules.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/rules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf11ab02c63f4b6cdfb4175b918f65409477b287 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/rules.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/symbolic.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/symbolic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..796bb56023731d9f949279cece078541b77b265f Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/symbolic.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__pycache__/use_rules.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/use_rules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8527608b84eaa8ab30ddc2f1be492ac7c543e61a Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/__pycache__/use_rules.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/__version__.py b/phivenv/Lib/site-packages/numpy/f2py/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..8813675308034731efa254fb5d1d13136a8477c2 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/__version__.py @@ -0,0 +1 @@ +from numpy.version import version diff --git a/phivenv/Lib/site-packages/numpy/f2py/_backends/__init__.py b/phivenv/Lib/site-packages/numpy/f2py/_backends/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e84da4d1c8ac5a6bfd15bd67a302e8f937c36224 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/_backends/__init__.py @@ -0,0 +1,9 @@ +def f2py_build_generator(name): + if name == "meson": + from ._meson import MesonBackend + return MesonBackend + elif name == "distutils": + from ._distutils import DistutilsBackend + return DistutilsBackend + else: + raise ValueError(f"Unknown backend: {name}") diff --git a/phivenv/Lib/site-packages/numpy/f2py/_backends/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/_backends/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d1cfee58e8de878e901798062f641af67a23c8d Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/_backends/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/_backends/__pycache__/_backend.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/_backends/__pycache__/_backend.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f40447caa29368ad07524622ce959e95d37678e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/_backends/__pycache__/_backend.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/_backends/__pycache__/_distutils.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/_backends/__pycache__/_distutils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cd65acc08af3a4fb78ae655099be695bc709db6 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/_backends/__pycache__/_distutils.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/_backends/__pycache__/_meson.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/_backends/__pycache__/_meson.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67395bf7b70a3ac8d41bde2d72bc2ae33eac9320 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/_backends/__pycache__/_meson.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/_backends/_backend.py b/phivenv/Lib/site-packages/numpy/f2py/_backends/_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..dd16e8762671de742336df2e1e185fece92246fe --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/_backends/_backend.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod + + +class Backend(ABC): + def __init__( + self, + modulename, + sources, + extra_objects, + build_dir, + include_dirs, + library_dirs, + libraries, + define_macros, + undef_macros, + f2py_flags, + sysinfo_flags, + fc_flags, + flib_flags, + setup_flags, + remove_build_dir, + extra_dat, + ): + self.modulename = modulename + self.sources = sources + self.extra_objects = extra_objects + self.build_dir = build_dir + self.include_dirs = include_dirs + self.library_dirs = library_dirs + self.libraries = libraries + self.define_macros = define_macros + self.undef_macros = undef_macros + self.f2py_flags = f2py_flags + self.sysinfo_flags = sysinfo_flags + self.fc_flags = fc_flags + self.flib_flags = flib_flags + self.setup_flags = setup_flags + self.remove_build_dir = remove_build_dir + self.extra_dat = extra_dat + + @abstractmethod + def compile(self) -> None: + """Compile the wrapper.""" + pass diff --git a/phivenv/Lib/site-packages/numpy/f2py/_backends/_distutils.py b/phivenv/Lib/site-packages/numpy/f2py/_backends/_distutils.py new file mode 100644 index 0000000000000000000000000000000000000000..b9d169ec81727809470df78efec9c5fe3140d37f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/_backends/_distutils.py @@ -0,0 +1,75 @@ +from ._backend import Backend + +from numpy.distutils.core import setup, Extension +from numpy.distutils.system_info import get_info +from numpy.distutils.misc_util import dict_append +from numpy.exceptions import VisibleDeprecationWarning +import os +import sys +import shutil +import warnings + + +class DistutilsBackend(Backend): + def __init__(sef, *args, **kwargs): + warnings.warn( + "\ndistutils has been deprecated since NumPy 1.26.x\n" + "Use the Meson backend instead, or generate wrappers" + " without -c and use a custom build script", + VisibleDeprecationWarning, + stacklevel=2, + ) + super().__init__(*args, **kwargs) + + def compile(self): + num_info = {} + if num_info: + self.include_dirs.extend(num_info.get("include_dirs", [])) + ext_args = { + "name": self.modulename, + "sources": self.sources, + "include_dirs": self.include_dirs, + "library_dirs": self.library_dirs, + "libraries": self.libraries, + "define_macros": self.define_macros, + "undef_macros": self.undef_macros, + "extra_objects": self.extra_objects, + "f2py_options": self.f2py_flags, + } + + if self.sysinfo_flags: + for n in self.sysinfo_flags: + i = get_info(n) + if not i: + print( + f"No {repr(n)} resources found" + "in system (try `f2py --help-link`)" + ) + dict_append(ext_args, **i) + + ext = Extension(**ext_args) + + sys.argv = [sys.argv[0]] + self.setup_flags + sys.argv.extend( + [ + "build", + "--build-temp", + self.build_dir, + "--build-base", + self.build_dir, + "--build-platlib", + ".", + "--disable-optimization", + ] + ) + + if self.fc_flags: + sys.argv.extend(["config_fc"] + self.fc_flags) + if self.flib_flags: + sys.argv.extend(["build_ext"] + self.flib_flags) + + setup(ext_modules=[ext]) + + if self.remove_build_dir and os.path.exists(self.build_dir): + print(f"Removing build directory {self.build_dir}") + shutil.rmtree(self.build_dir) diff --git a/phivenv/Lib/site-packages/numpy/f2py/_backends/_meson.py b/phivenv/Lib/site-packages/numpy/f2py/_backends/_meson.py new file mode 100644 index 0000000000000000000000000000000000000000..df2b7b43b25c79a02a5ee732bbccc1026ba4a345 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/_backends/_meson.py @@ -0,0 +1,234 @@ +from __future__ import annotations + +import os +import errno +import shutil +import subprocess +import sys +import re +from pathlib import Path + +from ._backend import Backend +from string import Template +from itertools import chain + +import warnings + + +class MesonTemplate: + """Template meson build file generation class.""" + + def __init__( + self, + modulename: str, + sources: list[Path], + deps: list[str], + libraries: list[str], + library_dirs: list[Path], + include_dirs: list[Path], + object_files: list[Path], + linker_args: list[str], + fortran_args: list[str], + build_type: str, + python_exe: str, + ): + self.modulename = modulename + self.build_template_path = ( + Path(__file__).parent.absolute() / "meson.build.template" + ) + self.sources = sources + self.deps = deps + self.libraries = libraries + self.library_dirs = library_dirs + if include_dirs is not None: + self.include_dirs = include_dirs + else: + self.include_dirs = [] + self.substitutions = {} + self.objects = object_files + # Convert args to '' wrapped variant for meson + self.fortran_args = [ + f"'{x}'" if not (x.startswith("'") and x.endswith("'")) else x + for x in fortran_args + ] + self.pipeline = [ + self.initialize_template, + self.sources_substitution, + self.deps_substitution, + self.include_substitution, + self.libraries_substitution, + self.fortran_args_substitution, + ] + self.build_type = build_type + self.python_exe = python_exe + self.indent = " " * 21 + + def meson_build_template(self) -> str: + if not self.build_template_path.is_file(): + raise FileNotFoundError( + errno.ENOENT, + "Meson build template" + f" {self.build_template_path.absolute()}" + " does not exist.", + ) + return self.build_template_path.read_text() + + def initialize_template(self) -> None: + self.substitutions["modulename"] = self.modulename + self.substitutions["buildtype"] = self.build_type + self.substitutions["python"] = self.python_exe + + def sources_substitution(self) -> None: + self.substitutions["source_list"] = ",\n".join( + [f"{self.indent}'''{source}'''," for source in self.sources] + ) + + def deps_substitution(self) -> None: + self.substitutions["dep_list"] = f",\n{self.indent}".join( + [f"{self.indent}dependency('{dep}')," for dep in self.deps] + ) + + def libraries_substitution(self) -> None: + self.substitutions["lib_dir_declarations"] = "\n".join( + [ + f"lib_dir_{i} = declare_dependency(link_args : ['''-L{lib_dir}'''])" + for i, lib_dir in enumerate(self.library_dirs) + ] + ) + + self.substitutions["lib_declarations"] = "\n".join( + [ + f"{lib.replace('.','_')} = declare_dependency(link_args : ['-l{lib}'])" + for lib in self.libraries + ] + ) + + self.substitutions["lib_list"] = f"\n{self.indent}".join( + [f"{self.indent}{lib.replace('.','_')}," for lib in self.libraries] + ) + self.substitutions["lib_dir_list"] = f"\n{self.indent}".join( + [f"{self.indent}lib_dir_{i}," for i in range(len(self.library_dirs))] + ) + + def include_substitution(self) -> None: + self.substitutions["inc_list"] = f",\n{self.indent}".join( + [f"{self.indent}'''{inc}'''," for inc in self.include_dirs] + ) + + def fortran_args_substitution(self) -> None: + if self.fortran_args: + self.substitutions["fortran_args"] = ( + f"{self.indent}fortran_args: [{', '.join([arg for arg in self.fortran_args])}]," + ) + else: + self.substitutions["fortran_args"] = "" + + def generate_meson_build(self): + for node in self.pipeline: + node() + template = Template(self.meson_build_template()) + meson_build = template.substitute(self.substitutions) + meson_build = re.sub(r",,", ",", meson_build) + return meson_build + + +class MesonBackend(Backend): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.dependencies = self.extra_dat.get("dependencies", []) + self.meson_build_dir = "bbdir" + self.build_type = ( + "debug" if any("debug" in flag for flag in self.fc_flags) else "release" + ) + self.fc_flags = _get_flags(self.fc_flags) + + def _move_exec_to_root(self, build_dir: Path): + walk_dir = Path(build_dir) / self.meson_build_dir + path_objects = chain( + walk_dir.glob(f"{self.modulename}*.so"), + walk_dir.glob(f"{self.modulename}*.pyd"), + ) + # Same behavior as distutils + # https://github.com/numpy/numpy/issues/24874#issuecomment-1835632293 + for path_object in path_objects: + dest_path = Path.cwd() / path_object.name + if dest_path.exists(): + dest_path.unlink() + shutil.copy2(path_object, dest_path) + os.remove(path_object) + + def write_meson_build(self, build_dir: Path) -> None: + """Writes the meson build file at specified location""" + meson_template = MesonTemplate( + self.modulename, + self.sources, + self.dependencies, + self.libraries, + self.library_dirs, + self.include_dirs, + self.extra_objects, + self.flib_flags, + self.fc_flags, + self.build_type, + sys.executable, + ) + src = meson_template.generate_meson_build() + Path(build_dir).mkdir(parents=True, exist_ok=True) + meson_build_file = Path(build_dir) / "meson.build" + meson_build_file.write_text(src) + return meson_build_file + + def _run_subprocess_command(self, command, cwd): + subprocess.run(command, cwd=cwd, check=True) + + def run_meson(self, build_dir: Path): + setup_command = ["meson", "setup", self.meson_build_dir] + self._run_subprocess_command(setup_command, build_dir) + compile_command = ["meson", "compile", "-C", self.meson_build_dir] + self._run_subprocess_command(compile_command, build_dir) + + def compile(self) -> None: + self.sources = _prepare_sources(self.modulename, self.sources, self.build_dir) + self.write_meson_build(self.build_dir) + self.run_meson(self.build_dir) + self._move_exec_to_root(self.build_dir) + + +def _prepare_sources(mname, sources, bdir): + extended_sources = sources.copy() + Path(bdir).mkdir(parents=True, exist_ok=True) + # Copy sources + for source in sources: + if Path(source).exists() and Path(source).is_file(): + shutil.copy(source, bdir) + generated_sources = [ + Path(f"{mname}module.c"), + Path(f"{mname}-f2pywrappers2.f90"), + Path(f"{mname}-f2pywrappers.f"), + ] + bdir = Path(bdir) + for generated_source in generated_sources: + if generated_source.exists(): + shutil.copy(generated_source, bdir / generated_source.name) + extended_sources.append(generated_source.name) + generated_source.unlink() + extended_sources = [ + Path(source).name + for source in extended_sources + if not Path(source).suffix == ".pyf" + ] + return extended_sources + + +def _get_flags(fc_flags): + flag_values = [] + flag_pattern = re.compile(r"--f(77|90)flags=(.*)") + for flag in fc_flags: + match_result = flag_pattern.match(flag) + if match_result: + values = match_result.group(2).strip().split() + values = [val.strip("'\"") for val in values] + flag_values.extend(values) + # Hacky way to preserve order of flags + unique_flags = list(dict.fromkeys(flag_values)) + return unique_flags diff --git a/phivenv/Lib/site-packages/numpy/f2py/_backends/meson.build.template b/phivenv/Lib/site-packages/numpy/f2py/_backends/meson.build.template new file mode 100644 index 0000000000000000000000000000000000000000..0be7c17e1f13eb229a1b65e0903faa1e19524636 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/_backends/meson.build.template @@ -0,0 +1,55 @@ +project('${modulename}', + ['c', 'fortran'], + version : '0.1', + meson_version: '>= 1.1.0', + default_options : [ + 'warning_level=1', + 'buildtype=${buildtype}' + ]) +fc = meson.get_compiler('fortran') + +py = import('python').find_installation('''${python}''', pure: false) +py_dep = py.dependency() + +incdir_numpy = run_command(py, + ['-c', 'import os; os.chdir(".."); import numpy; print(numpy.get_include())'], + check : true +).stdout().strip() + +incdir_f2py = run_command(py, + ['-c', 'import os; os.chdir(".."); import numpy.f2py; print(numpy.f2py.get_include())'], + check : true +).stdout().strip() + +inc_np = include_directories(incdir_numpy) +np_dep = declare_dependency(include_directories: inc_np) + +incdir_f2py = incdir_numpy / '..' / '..' / 'f2py' / 'src' +inc_f2py = include_directories(incdir_f2py) +fortranobject_c = incdir_f2py / 'fortranobject.c' + +inc_np = include_directories(incdir_numpy, incdir_f2py) +# gh-25000 +quadmath_dep = fc.find_library('quadmath', required: false) + +${lib_declarations} +${lib_dir_declarations} + +py.extension_module('${modulename}', + [ +${source_list}, + fortranobject_c + ], + include_directories: [ + inc_np, +${inc_list} + ], + dependencies : [ + py_dep, + quadmath_dep, +${dep_list} +${lib_list} +${lib_dir_list} + ], +${fortran_args} + install : true) diff --git a/phivenv/Lib/site-packages/numpy/f2py/_isocbind.py b/phivenv/Lib/site-packages/numpy/f2py/_isocbind.py new file mode 100644 index 0000000000000000000000000000000000000000..4a1399588bc9386219acff9ad7a483e27ea153f9 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/_isocbind.py @@ -0,0 +1,62 @@ +""" +ISO_C_BINDING maps for f2py2e. +Only required declarations/macros/functions will be used. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +# These map to keys in c2py_map, via forced casting for now, see gh-25229 +iso_c_binding_map = { + 'integer': { + 'c_int': 'int', + 'c_short': 'short', # 'short' <=> 'int' for now + 'c_long': 'long', # 'long' <=> 'int' for now + 'c_long_long': 'long_long', + 'c_signed_char': 'signed_char', + 'c_size_t': 'unsigned', # size_t <=> 'unsigned' for now + 'c_int8_t': 'signed_char', # int8_t <=> 'signed_char' for now + 'c_int16_t': 'short', # int16_t <=> 'short' for now + 'c_int32_t': 'int', # int32_t <=> 'int' for now + 'c_int64_t': 'long_long', + 'c_int_least8_t': 'signed_char', # int_least8_t <=> 'signed_char' for now + 'c_int_least16_t': 'short', # int_least16_t <=> 'short' for now + 'c_int_least32_t': 'int', # int_least32_t <=> 'int' for now + 'c_int_least64_t': 'long_long', + 'c_int_fast8_t': 'signed_char', # int_fast8_t <=> 'signed_char' for now + 'c_int_fast16_t': 'short', # int_fast16_t <=> 'short' for now + 'c_int_fast32_t': 'int', # int_fast32_t <=> 'int' for now + 'c_int_fast64_t': 'long_long', + 'c_intmax_t': 'long_long', # intmax_t <=> 'long_long' for now + 'c_intptr_t': 'long', # intptr_t <=> 'long' for now + 'c_ptrdiff_t': 'long', # ptrdiff_t <=> 'long' for now + }, + 'real': { + 'c_float': 'float', + 'c_double': 'double', + 'c_long_double': 'long_double' + }, + 'complex': { + 'c_float_complex': 'complex_float', + 'c_double_complex': 'complex_double', + 'c_long_double_complex': 'complex_long_double' + }, + 'logical': { + 'c_bool': 'unsigned_char' # _Bool <=> 'unsigned_char' for now + }, + 'character': { + 'c_char': 'char' + } +} + +# TODO: See gh-25229 +isoc_c2pycode_map = {} +iso_c2py_map = {} + +isoc_kindmap = {} +for fortran_type, c_type_dict in iso_c_binding_map.items(): + for c_type in c_type_dict.keys(): + isoc_kindmap[c_type] = fortran_type diff --git a/phivenv/Lib/site-packages/numpy/f2py/_src_pyf.py b/phivenv/Lib/site-packages/numpy/f2py/_src_pyf.py new file mode 100644 index 0000000000000000000000000000000000000000..8c9c0861d0c73010a7f6b7af55b3175eae8978d9 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/_src_pyf.py @@ -0,0 +1,239 @@ +import re + +# START OF CODE VENDORED FROM `numpy.distutils.from_template` +############################################################# +""" +process_file(filename) + + takes templated file .xxx.src and produces .xxx file where .xxx + is .pyf .f90 or .f using the following template rules: + + '<..>' denotes a template. + + All function and subroutine blocks in a source file with names that + contain '<..>' will be replicated according to the rules in '<..>'. + + The number of comma-separated words in '<..>' will determine the number of + replicates. + + '<..>' may have two different forms, named and short. For example, + + named: + where anywhere inside a block '

' will be replaced with + 'd', 's', 'z', and 'c' for each replicate of the block. + + <_c> is already defined: <_c=s,d,c,z> + <_t> is already defined: <_t=real,double precision,complex,double complex> + + short: + , a short form of the named, useful when no

appears inside + a block. + + In general, '<..>' contains a comma separated list of arbitrary + expressions. If these expression must contain a comma|leftarrow|rightarrow, + then prepend the comma|leftarrow|rightarrow with a backslash. + + If an expression matches '\\' then it will be replaced + by -th expression. + + Note that all '<..>' forms in a block must have the same number of + comma-separated entries. + + Predefined named template rules: + + + + + +""" + +routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) +routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) +function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) + +def parse_structure(astr): + """ Return a list of tuples for each function or subroutine each + tuple is the start and end of a subroutine or function to be + expanded. + """ + + spanlist = [] + ind = 0 + while True: + m = routine_start_re.search(astr, ind) + if m is None: + break + start = m.start() + if function_start_re.match(astr, start, m.end()): + while True: + i = astr.rfind('\n', ind, start) + if i==-1: + break + start = i + if astr[i:i+7]!='\n $': + break + start += 1 + m = routine_end_re.search(astr, m.end()) + ind = end = m and m.end()-1 or len(astr) + spanlist.append((start, end)) + return spanlist + +template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") +named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") +list_re = re.compile(r"<\s*((.*?))\s*>") + +def find_repl_patterns(astr): + reps = named_re.findall(astr) + names = {} + for rep in reps: + name = rep[0].strip() or unique_key(names) + repl = rep[1].replace(r'\,', '@comma@') + thelist = conv(repl) + names[name] = thelist + return names + +def find_and_remove_repl_patterns(astr): + names = find_repl_patterns(astr) + astr = re.subn(named_re, '', astr)[0] + return astr, names + +item_re = re.compile(r"\A\\(?P\d+)\Z") +def conv(astr): + b = astr.split(',') + l = [x.strip() for x in b] + for i in range(len(l)): + m = item_re.match(l[i]) + if m: + j = int(m.group('index')) + l[i] = l[j] + return ','.join(l) + +def unique_key(adict): + """ Obtain a unique key given a dictionary.""" + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = '__l%s' % (n) + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') +def expand_sub(substr, names): + substr = substr.replace(r'\>', '@rightarrow@') + substr = substr.replace(r'\<', '@leftarrow@') + lnames = find_repl_patterns(substr) + substr = named_re.sub(r"<\1>", substr) # get rid of definition templates + + def listrepl(mobj): + thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) + if template_name_re.match(thelist): + return "<%s>" % (thelist) + name = None + for key in lnames.keys(): # see if list is already in dictionary + if lnames[key] == thelist: + name = key + if name is None: # this list is not in the dictionary yet + name = unique_key(lnames) + lnames[name] = thelist + return "<%s>" % name + + substr = list_re.sub(listrepl, substr) # convert all lists to named templates + # newnames are constructed as needed + + numsubs = None + base_rule = None + rules = {} + for r in template_re.findall(substr): + if r not in rules: + thelist = lnames.get(r, names.get(r, None)) + if thelist is None: + raise ValueError('No replicates found for <%s>' % (r)) + if r not in names and not thelist.startswith('_'): + names[r] = thelist + rule = [i.replace('@comma@', ',') for i in thelist.split(',')] + num = len(rule) + + if numsubs is None: + numsubs = num + rules[r] = rule + base_rule = r + elif num == numsubs: + rules[r] = rule + else: + print("Mismatch in number of replacements (base <{}={}>) " + "for <{}={}>. Ignoring.".format(base_rule, ','.join(rules[base_rule]), r, thelist)) + if not rules: + return substr + + def namerepl(mobj): + name = mobj.group(1) + return rules.get(name, (k+1)*[name])[k] + + newstr = '' + for k in range(numsubs): + newstr += template_re.sub(namerepl, substr) + '\n\n' + + newstr = newstr.replace('@rightarrow@', '>') + newstr = newstr.replace('@leftarrow@', '<') + return newstr + +def process_str(allstr): + newstr = allstr + writestr = '' + + struct = parse_structure(newstr) + + oldend = 0 + names = {} + names.update(_special_names) + for sub in struct: + cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) + writestr += cleanedstr + names.update(defs) + writestr += expand_sub(newstr[sub[0]:sub[1]], names) + oldend = sub[1] + writestr += newstr[oldend:] + + return writestr + +include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + with open(source) as fid: + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + return lines + +def process_file(source): + lines = resolve_includes(source) + return process_str(''.join(lines)) + +_special_names = find_repl_patterns(''' +<_c=s,d,c,z> +<_t=real,double precision,complex,double complex> + + + + + +''') + +# END OF CODE VENDORED FROM `numpy.distutils.from_template` +########################################################### diff --git a/phivenv/Lib/site-packages/numpy/f2py/auxfuncs.py b/phivenv/Lib/site-packages/numpy/f2py/auxfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..8dfad29059d55ec64734635625f28307c4cfc5ab --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/auxfuncs.py @@ -0,0 +1,988 @@ +""" +Auxiliary functions for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) LICENSE. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import pprint +import sys +import re +import types +from functools import reduce +from copy import deepcopy + +from . import __version__ +from . import cfuncs +from .cfuncs import errmess + +__all__ = [ + 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', + 'getargs2', 'getcallprotoargument', 'getcallstatement', + 'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode', + 'getusercode1', 'getdimension', 'hasbody', 'hascallstatement', 'hascommon', + 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', + 'isallocatable', 'isarray', 'isarrayofstrings', + 'ischaracter', 'ischaracterarray', 'ischaracter_or_characterarray', + 'iscomplex', + 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn', + 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction', + 'isfunction_wrap', 'isint1', 'isint1array', 'isinteger', 'isintent_aux', + 'isintent_c', 'isintent_callback', 'isintent_copy', 'isintent_dict', + 'isintent_hide', 'isintent_in', 'isintent_inout', 'isintent_inplace', + 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', + 'islogicalfunction', 'islong_complex', 'islong_double', + 'islong_doublefunction', 'islong_long', 'islong_longfunction', + 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired', + 'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring', + 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', + 'issubroutine', 'get_f2py_modulename', + 'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char', + 'isunsigned_chararray', 'isunsigned_long_long', + 'isunsigned_long_longarray', 'isunsigned_short', + 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', + 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', + 'getuseblocks', 'process_f2cmap_dict' +] + + +f2py_version = __version__.version + + +show = pprint.pprint + +options = {} +debugoptions = [] +wrapfuncs = 1 + + +def outmess(t): + if options.get('verbose', 1): + sys.stdout.write(t) + + +def debugcapi(var): + return 'capi' in debugoptions + + +def _ischaracter(var): + return 'typespec' in var and var['typespec'] == 'character' and \ + not isexternal(var) + + +def _isstring(var): + return 'typespec' in var and var['typespec'] == 'character' and \ + not isexternal(var) + + +def ischaracter_or_characterarray(var): + return _ischaracter(var) and 'charselector' not in var + + +def ischaracter(var): + return ischaracter_or_characterarray(var) and not isarray(var) + + +def ischaracterarray(var): + return ischaracter_or_characterarray(var) and isarray(var) + + +def isstring_or_stringarray(var): + return _ischaracter(var) and 'charselector' in var + + +def isstring(var): + return isstring_or_stringarray(var) and not isarray(var) + + +def isstringarray(var): + return isstring_or_stringarray(var) and isarray(var) + + +def isarrayofstrings(var): # obsolete? + # leaving out '*' for now so that `character*(*) a(m)` and `character + # a(m,*)` are treated differently. Luckily `character**` is illegal. + return isstringarray(var) and var['dimension'][-1] == '(*)' + + +def isarray(var): + return 'dimension' in var and not isexternal(var) + + +def isscalar(var): + return not (isarray(var) or isstring(var) or isexternal(var)) + + +def iscomplex(var): + return isscalar(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def islogical(var): + return isscalar(var) and var.get('typespec') == 'logical' + + +def isinteger(var): + return isscalar(var) and var.get('typespec') == 'integer' + + +def isreal(var): + return isscalar(var) and var.get('typespec') == 'real' + + +def get_kind(var): + try: + return var['kindselector']['*'] + except KeyError: + try: + return var['kindselector']['kind'] + except KeyError: + pass + + +def isint1(var): + return var.get('typespec') == 'integer' \ + and get_kind(var) == '1' and not isarray(var) + + +def islong_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') not in ['integer', 'logical']: + return 0 + return get_kind(var) == '8' + + +def isunsigned_char(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-1' + + +def isunsigned_short(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-2' + + +def isunsigned(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-4' + + +def isunsigned_long_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-8' + + +def isdouble(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '8' + + +def islong_double(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '16' + + +def islong_complex(var): + if not iscomplex(var): + return 0 + return get_kind(var) == '32' + + +def iscomplexarray(var): + return isarray(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def isint1array(var): + return isarray(var) and var.get('typespec') == 'integer' \ + and get_kind(var) == '1' + + +def isunsigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-1' + + +def isunsigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-2' + + +def isunsignedarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-4' + + +def isunsigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-8' + + +def issigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '1' + + +def issigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '2' + + +def issigned_array(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '4' + + +def issigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '8' + + +def isallocatable(var): + return 'attrspec' in var and 'allocatable' in var['attrspec'] + + +def ismutable(var): + return not ('dimension' not in var or isstring(var)) + + +def ismoduleroutine(rout): + return 'modulename' in rout + + +def ismodule(rout): + return 'block' in rout and 'module' == rout['block'] + + +def isfunction(rout): + return 'block' in rout and 'function' == rout['block'] + + +def isfunction_wrap(rout): + if isintent_c(rout): + return 0 + return wrapfuncs and isfunction(rout) and (not isexternal(rout)) + + +def issubroutine(rout): + return 'block' in rout and 'subroutine' == rout['block'] + + +def issubroutine_wrap(rout): + if isintent_c(rout): + return 0 + return issubroutine(rout) and hasassumedshape(rout) + +def isattr_value(var): + return 'value' in var.get('attrspec', []) + + +def hasassumedshape(rout): + if rout.get('hasassumedshape'): + return True + for a in rout['args']: + for d in rout['vars'].get(a, {}).get('dimension', []): + if d == ':': + rout['hasassumedshape'] = True + return True + return False + + +def requiresf90wrapper(rout): + return ismoduleroutine(rout) or hasassumedshape(rout) + + +def isroutine(rout): + return isfunction(rout) or issubroutine(rout) + + +def islogicalfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islogical(rout['vars'][a]) + return 0 + + +def islong_longfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_long(rout['vars'][a]) + return 0 + + +def islong_doublefunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_double(rout['vars'][a]) + return 0 + + +def iscomplexfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return iscomplex(rout['vars'][a]) + return 0 + + +def iscomplexfunction_warn(rout): + if iscomplexfunction(rout): + outmess("""\ + ************************************************************** + Warning: code with a function returning complex value + may not work correctly with your Fortran compiler. + When using GNU gcc/g77 compilers, codes should work + correctly for callbacks with: + f2py -c -DF2PY_CB_RETURNCOMPLEX + **************************************************************\n""") + return 1 + return 0 + + +def isstringfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return isstring(rout['vars'][a]) + return 0 + + +def hasexternals(rout): + return 'externals' in rout and rout['externals'] + + +def isthreadsafe(rout): + return 'f2pyenhancements' in rout and \ + 'threadsafe' in rout['f2pyenhancements'] + + +def hasvariables(rout): + return 'vars' in rout and rout['vars'] + + +def isoptional(var): + return ('attrspec' in var and 'optional' in var['attrspec'] and + 'required' not in var['attrspec']) and isintent_nothide(var) + + +def isexternal(var): + return 'attrspec' in var and 'external' in var['attrspec'] + + +def getdimension(var): + dimpattern = r"\((.*?)\)" + if 'attrspec' in var.keys(): + if any('dimension' in s for s in var['attrspec']): + return [re.findall(dimpattern, v) for v in var['attrspec']][0] + + +def isrequired(var): + return not isoptional(var) and isintent_nothide(var) + + +def isintent_in(var): + if 'intent' not in var: + return 1 + if 'hide' in var['intent']: + return 0 + if 'inplace' in var['intent']: + return 0 + if 'in' in var['intent']: + return 1 + if 'out' in var['intent']: + return 0 + if 'inout' in var['intent']: + return 0 + if 'outin' in var['intent']: + return 0 + return 1 + + +def isintent_inout(var): + return ('intent' in var and ('inout' in var['intent'] or + 'outin' in var['intent']) and 'in' not in var['intent'] and + 'hide' not in var['intent'] and 'inplace' not in var['intent']) + + +def isintent_out(var): + return 'out' in var.get('intent', []) + + +def isintent_hide(var): + return ('intent' in var and ('hide' in var['intent'] or + ('out' in var['intent'] and 'in' not in var['intent'] and + (not l_or(isintent_inout, isintent_inplace)(var))))) + + +def isintent_nothide(var): + return not isintent_hide(var) + + +def isintent_c(var): + return 'c' in var.get('intent', []) + + +def isintent_cache(var): + return 'cache' in var.get('intent', []) + + +def isintent_copy(var): + return 'copy' in var.get('intent', []) + + +def isintent_overwrite(var): + return 'overwrite' in var.get('intent', []) + + +def isintent_callback(var): + return 'callback' in var.get('intent', []) + + +def isintent_inplace(var): + return 'inplace' in var.get('intent', []) + + +def isintent_aux(var): + return 'aux' in var.get('intent', []) + + +def isintent_aligned4(var): + return 'aligned4' in var.get('intent', []) + + +def isintent_aligned8(var): + return 'aligned8' in var.get('intent', []) + + +def isintent_aligned16(var): + return 'aligned16' in var.get('intent', []) + + +isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT', + isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE', + isintent_cache: 'INTENT_CACHE', + isintent_c: 'INTENT_C', isoptional: 'OPTIONAL', + isintent_inplace: 'INTENT_INPLACE', + isintent_aligned4: 'INTENT_ALIGNED4', + isintent_aligned8: 'INTENT_ALIGNED8', + isintent_aligned16: 'INTENT_ALIGNED16', + } + + +def isprivate(var): + return 'attrspec' in var and 'private' in var['attrspec'] + + +def hasinitvalue(var): + return '=' in var + + +def hasinitvalueasstring(var): + if not hasinitvalue(var): + return 0 + return var['='][0] in ['"', "'"] + + +def hasnote(var): + return 'note' in var + + +def hasresultnote(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return hasnote(rout['vars'][a]) + return 0 + + +def hascommon(rout): + return 'common' in rout + + +def containscommon(rout): + if hascommon(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if containscommon(b): + return 1 + return 0 + + +def containsmodule(block): + if ismodule(block): + return 1 + if not hasbody(block): + return 0 + for b in block['body']: + if containsmodule(b): + return 1 + return 0 + + +def hasbody(rout): + return 'body' in rout + + +def hascallstatement(rout): + return getcallstatement(rout) is not None + + +def istrue(var): + return 1 + + +def isfalse(var): + return 0 + + +class F2PYError(Exception): + pass + + +class throw_error: + + def __init__(self, mess): + self.mess = mess + + def __call__(self, var): + mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess) + raise F2PYError(mess) + + +def l_and(*f): + l1, l2 = 'lambda v', [] + for i in range(len(f)): + l1 = '%s,f%d=f[%d]' % (l1, i, i) + l2.append('f%d(v)' % (i)) + return eval('%s:%s' % (l1, ' and '.join(l2))) + + +def l_or(*f): + l1, l2 = 'lambda v', [] + for i in range(len(f)): + l1 = '%s,f%d=f[%d]' % (l1, i, i) + l2.append('f%d(v)' % (i)) + return eval('%s:%s' % (l1, ' or '.join(l2))) + + +def l_not(f): + return eval('lambda v,f=f:not f(v)') + + +def isdummyroutine(rout): + try: + return rout['f2pyenhancements']['fortranname'] == '' + except KeyError: + return 0 + + +def getfortranname(rout): + try: + name = rout['f2pyenhancements']['fortranname'] + if name == '': + raise KeyError + if not name: + errmess('Failed to use fortranname from %s\n' % + (rout['f2pyenhancements'])) + raise KeyError + except KeyError: + name = rout['name'] + return name + + +def getmultilineblock(rout, blockname, comment=1, counter=0): + try: + r = rout['f2pyenhancements'].get(blockname) + except KeyError: + return + if not r: + return + if counter > 0 and isinstance(r, str): + return + if isinstance(r, list): + if counter >= len(r): + return + r = r[counter] + if r[:3] == "'''": + if comment: + r = '\t/* start ' + blockname + \ + ' multiline (' + repr(counter) + ') */\n' + r[3:] + else: + r = r[3:] + if r[-3:] == "'''": + if comment: + r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/' + else: + r = r[:-3] + else: + errmess("%s multiline block should end with `'''`: %s\n" + % (blockname, repr(r))) + return r + + +def getcallstatement(rout): + return getmultilineblock(rout, 'callstatement') + + +def getcallprotoargument(rout, cb_map={}): + r = getmultilineblock(rout, 'callprotoargument', comment=0) + if r: + return r + if hascallstatement(rout): + outmess( + 'warning: callstatement is defined without callprotoargument\n') + return + from .capi_maps import getctype + arg_types, arg_types2 = [], [] + if l_and(isstringfunction, l_not(isfunction_wrap))(rout): + arg_types.extend(['char*', 'size_t']) + for n in rout['args']: + var = rout['vars'][n] + if isintent_callback(var): + continue + if n in cb_map: + ctype = cb_map[n] + '_typedef' + else: + ctype = getctype(var) + if l_and(isintent_c, l_or(isscalar, iscomplex))(var): + pass + elif isstring(var): + pass + else: + if not isattr_value(var): + ctype = ctype + '*' + if ((isstring(var) + or isarrayofstrings(var) # obsolete? + or isstringarray(var))): + arg_types2.append('size_t') + arg_types.append(ctype) + + proto_args = ','.join(arg_types + arg_types2) + if not proto_args: + proto_args = 'void' + return proto_args + + +def getusercode(rout): + return getmultilineblock(rout, 'usercode') + + +def getusercode1(rout): + return getmultilineblock(rout, 'usercode', counter=1) + + +def getpymethoddef(rout): + return getmultilineblock(rout, 'pymethoddef') + + +def getargs(rout): + sortargs, args = [], [] + if 'args' in rout: + args = rout['args'] + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = rout['args'] + return args, sortargs + + +def getargs2(rout): + sortargs, args = [], rout.get('args', []) + auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a]) + and a not in args] + args = auxvars + args + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = auxvars + rout['args'] + return args, sortargs + + +def getrestdoc(rout): + if 'f2pymultilines' not in rout: + return None + k = None + if rout['block'] == 'python module': + k = rout['block'], rout['name'] + return rout['f2pymultilines'].get(k, None) + + +def gentitle(name): + ln = (80 - len(name) - 6) // 2 + return '/*%s %s %s*/' % (ln * '*', name, ln * '*') + + +def flatlist(lst): + if isinstance(lst, list): + return reduce(lambda x, y, f=flatlist: x + f(y), lst, []) + return [lst] + + +def stripcomma(s): + if s and s[-1] == ',': + return s[:-1] + return s + + +def replace(str, d, defaultsep=''): + if isinstance(d, list): + return [replace(str, _m, defaultsep) for _m in d] + if isinstance(str, list): + return [replace(_m, d, defaultsep) for _m in str] + for k in 2 * list(d.keys()): + if k == 'separatorsfor': + continue + if 'separatorsfor' in d and k in d['separatorsfor']: + sep = d['separatorsfor'][k] + else: + sep = defaultsep + if isinstance(d[k], list): + str = str.replace('#%s#' % (k), sep.join(flatlist(d[k]))) + else: + str = str.replace('#%s#' % (k), d[k]) + return str + + +def dictappend(rd, ar): + if isinstance(ar, list): + for a in ar: + rd = dictappend(rd, a) + return rd + for k in ar.keys(): + if k[0] == '_': + continue + if k in rd: + if isinstance(rd[k], str): + rd[k] = [rd[k]] + if isinstance(rd[k], list): + if isinstance(ar[k], list): + rd[k] = rd[k] + ar[k] + else: + rd[k].append(ar[k]) + elif isinstance(rd[k], dict): + if isinstance(ar[k], dict): + if k == 'separatorsfor': + for k1 in ar[k].keys(): + if k1 not in rd[k]: + rd[k][k1] = ar[k][k1] + else: + rd[k] = dictappend(rd[k], ar[k]) + else: + rd[k] = ar[k] + return rd + + +def applyrules(rules, d, var={}): + ret = {} + if isinstance(rules, list): + for r in rules: + rr = applyrules(r, d, var) + ret = dictappend(ret, rr) + if '_break' in rr: + break + return ret + if '_check' in rules and (not rules['_check'](var)): + return ret + if 'need' in rules: + res = applyrules({'needs': rules['need']}, d, var) + if 'needs' in res: + cfuncs.append_needs(res['needs']) + + for k in rules.keys(): + if k == 'separatorsfor': + ret[k] = rules[k] + continue + if isinstance(rules[k], str): + ret[k] = replace(rules[k], d) + elif isinstance(rules[k], list): + ret[k] = [] + for i in rules[k]: + ar = applyrules({k: i}, d, var) + if k in ar: + ret[k].append(ar[k]) + elif k[0] == '_': + continue + elif isinstance(rules[k], dict): + ret[k] = [] + for k1 in rules[k].keys(): + if isinstance(k1, types.FunctionType) and k1(var): + if isinstance(rules[k][k1], list): + for i in rules[k][k1]: + if isinstance(i, dict): + res = applyrules({'supertext': i}, d, var) + if 'supertext' in res: + i = res['supertext'] + else: + i = '' + ret[k].append(replace(i, d)) + else: + i = rules[k][k1] + if isinstance(i, dict): + res = applyrules({'supertext': i}, d) + if 'supertext' in res: + i = res['supertext'] + else: + i = '' + ret[k].append(replace(i, d)) + else: + errmess('applyrules: ignoring rule %s.\n' % repr(rules[k])) + if isinstance(ret[k], list): + if len(ret[k]) == 1: + ret[k] = ret[k][0] + if ret[k] == []: + del ret[k] + return ret + +_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', + re.I).match +_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' + r'__user__[\w_]*)', re.I).match + +def get_f2py_modulename(source): + name = None + with open(source) as f: + for line in f: + m = _f2py_module_name_match(line) + if m: + if _f2py_user_module_name_match(line): # skip *__user__* names + continue + name = m.group('name') + break + return name + +def getuseblocks(pymod): + all_uses = [] + for inner in pymod['body']: + for modblock in inner['body']: + if modblock.get('use'): + all_uses.extend([x for x in modblock.get("use").keys() if "__" not in x]) + return all_uses + +def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose = False): + """ + Update the Fortran-to-C type mapping dictionary with new mappings and + return a list of successfully mapped C types. + + This function integrates a new mapping dictionary into an existing + Fortran-to-C type mapping dictionary. It ensures that all keys are in + lowercase and validates new entries against a given C-to-Python mapping + dictionary. Redefinitions and invalid entries are reported with a warning. + + Parameters + ---------- + f2cmap_all : dict + The existing Fortran-to-C type mapping dictionary that will be updated. + It should be a dictionary of dictionaries where the main keys represent + Fortran types and the nested dictionaries map Fortran type specifiers + to corresponding C types. + + new_map : dict + A dictionary containing new type mappings to be added to `f2cmap_all`. + The structure should be similar to `f2cmap_all`, with keys representing + Fortran types and values being dictionaries of type specifiers and their + C type equivalents. + + c2py_map : dict + A dictionary used for validating the C types in `new_map`. It maps C + types to corresponding Python types and is used to ensure that the C + types specified in `new_map` are valid. + + verbose : boolean + A flag used to provide information about the types mapped + + Returns + ------- + tuple of (dict, list) + The updated Fortran-to-C type mapping dictionary and a list of + successfully mapped C types. + """ + f2cmap_mapped = [] + + new_map_lower = {} + for k, d1 in new_map.items(): + d1_lower = {k1.lower(): v1 for k1, v1 in d1.items()} + new_map_lower[k.lower()] = d1_lower + + for k, d1 in new_map_lower.items(): + if k not in f2cmap_all: + f2cmap_all[k] = {} + + for k1, v1 in d1.items(): + if v1 in c2py_map: + if k1 in f2cmap_all[k]: + outmess( + "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" + % (k, k1, f2cmap_all[k][k1], v1) + ) + f2cmap_all[k][k1] = v1 + if verbose: + outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k, k1, v1)) + f2cmap_mapped.append(v1) + else: + if verbose: + errmess( + "\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" + % (k, k1, v1, v1, list(c2py_map.keys())) + ) + + return f2cmap_all, f2cmap_mapped diff --git a/phivenv/Lib/site-packages/numpy/f2py/capi_maps.py b/phivenv/Lib/site-packages/numpy/f2py/capi_maps.py new file mode 100644 index 0000000000000000000000000000000000000000..336ea8163956ddf516846f72effd248ebd1280a7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/capi_maps.py @@ -0,0 +1,819 @@ +""" +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__ +f2py_version = __version__.version + +import copy +import re +import os +from .crackfortran import markoutercomma +from . import cb_rules +from ._isocbind import iso_c_binding_map, isoc_c2pycode_map, iso_c2py_map + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + +__all__ = [ + 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', + 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map', + 'cb_sign2map', 'cb_routsign2map', 'common_sign2map', 'process_f2cmap_dict' +] + + +depargs = [] +lcb_map = {} +lcb2_map = {} +# forced casting: mainly caused by the fact that Python or Numeric +# C/APIs do not support the corresponding C types. +c2py_map = {'double': 'float', + 'float': 'float', # forced casting + 'long_double': 'float', # forced casting + 'char': 'int', # forced casting + 'signed_char': 'int', # forced casting + 'unsigned_char': 'int', # forced casting + 'short': 'int', # forced casting + 'unsigned_short': 'int', # forced casting + 'int': 'int', # forced casting + 'long': 'int', + 'long_long': 'long', + 'unsigned': 'int', # forced casting + 'complex_float': 'complex', # forced casting + 'complex_double': 'complex', + 'complex_long_double': 'complex', # forced casting + 'string': 'string', + 'character': 'bytes', + } + +c2capi_map = {'double': 'NPY_DOUBLE', + 'float': 'NPY_FLOAT', + 'long_double': 'NPY_LONGDOUBLE', + 'char': 'NPY_BYTE', + 'unsigned_char': 'NPY_UBYTE', + 'signed_char': 'NPY_BYTE', + 'short': 'NPY_SHORT', + 'unsigned_short': 'NPY_USHORT', + 'int': 'NPY_INT', + 'unsigned': 'NPY_UINT', + 'long': 'NPY_LONG', + 'unsigned_long': 'NPY_ULONG', + 'long_long': 'NPY_LONGLONG', + 'unsigned_long_long': 'NPY_ULONGLONG', + 'complex_float': 'NPY_CFLOAT', + 'complex_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CDOUBLE', + 'string': 'NPY_STRING', + 'character': 'NPY_STRING'} + +c2pycode_map = {'double': 'd', + 'float': 'f', + 'long_double': 'g', + 'char': 'b', + 'unsigned_char': 'B', + 'signed_char': 'b', + 'short': 'h', + 'unsigned_short': 'H', + 'int': 'i', + 'unsigned': 'I', + 'long': 'l', + 'unsigned_long': 'L', + 'long_long': 'q', + 'unsigned_long_long': 'Q', + 'complex_float': 'F', + 'complex_double': 'D', + 'complex_long_double': 'G', + 'string': 'S', + 'character': 'c'} + +# https://docs.python.org/3/c-api/arg.html#building-values +c2buildvalue_map = {'double': 'd', + 'float': 'f', + 'char': 'b', + 'signed_char': 'b', + 'short': 'h', + 'int': 'i', + 'long': 'l', + 'long_long': 'L', + 'complex_float': 'N', + 'complex_double': 'N', + 'complex_long_double': 'N', + 'string': 'y', + 'character': 'c'} + +f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double', + '12': 'long_double', '16': 'long_double'}, + 'integer': {'': 'int', '1': 'signed_char', '2': 'short', + '4': 'int', '8': 'long_long', + '-1': 'unsigned_char', '-2': 'unsigned_short', + '-4': 'unsigned', '-8': 'unsigned_long_long'}, + 'complex': {'': 'complex_float', '8': 'complex_float', + '16': 'complex_double', '24': 'complex_long_double', + '32': 'complex_long_double'}, + 'complexkind': {'': 'complex_float', '4': 'complex_float', + '8': 'complex_double', '12': 'complex_long_double', + '16': 'complex_long_double'}, + 'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int', + '8': 'long_long'}, + 'double complex': {'': 'complex_double'}, + 'double precision': {'': 'double'}, + 'byte': {'': 'char'}, + } + +# Add ISO_C handling +c2pycode_map.update(isoc_c2pycode_map) +c2py_map.update(iso_c2py_map) +f2cmap_all, _ = process_f2cmap_dict(f2cmap_all, iso_c_binding_map, c2py_map) +# End ISO_C handling +f2cmap_default = copy.deepcopy(f2cmap_all) + +f2cmap_mapped = [] + +def load_f2cmap_file(f2cmap_file): + global f2cmap_all, f2cmap_mapped + + f2cmap_all = copy.deepcopy(f2cmap_default) + + if f2cmap_file is None: + # Default value + f2cmap_file = '.f2py_f2cmap' + if not os.path.isfile(f2cmap_file): + return + + # User defined additions to f2cmap_all. + # f2cmap_file must contain a dictionary of dictionaries, only. For + # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is + # interpreted as C 'float'. This feature is useful for F90/95 users if + # they use PARAMETERS in type specifications. + try: + outmess('Reading f2cmap from {!r} ...\n'.format(f2cmap_file)) + with open(f2cmap_file) as f: + d = eval(f.read().lower(), {}, {}) + f2cmap_all, f2cmap_mapped = process_f2cmap_dict(f2cmap_all, d, c2py_map, True) + outmess('Successfully applied user defined f2cmap changes\n') + except Exception as msg: + errmess('Failed to apply user defined f2cmap changes: %s. Skipping.\n' % (msg)) + + +cformat_map = {'double': '%g', + 'float': '%g', + 'long_double': '%Lg', + 'char': '%d', + 'signed_char': '%d', + 'unsigned_char': '%hhu', + 'short': '%hd', + 'unsigned_short': '%hu', + 'int': '%d', + 'unsigned': '%u', + 'long': '%ld', + 'unsigned_long': '%lu', + 'long_long': '%ld', + 'complex_float': '(%g,%g)', + 'complex_double': '(%g,%g)', + 'complex_long_double': '(%Lg,%Lg)', + 'string': '\\"%s\\"', + 'character': "'%c'", + } + +# Auxiliary functions + + +def getctype(var): + """ + Determines C type + """ + ctype = 'void' + if isfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getctype(var['vars'][a]) + else: + errmess('getctype: function %s has no return value?!\n' % a) + elif issubroutine(var): + return ctype + elif ischaracter_or_characterarray(var): + return 'character' + elif isstring_or_stringarray(var): + return 'string' + elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: + typespec = var['typespec'].lower() + f2cmap = f2cmap_all[typespec] + ctype = f2cmap[''] # default type + if 'kindselector' in var: + if '*' in var['kindselector']: + try: + ctype = f2cmap[var['kindselector']['*']] + except KeyError: + errmess('getctype: "%s %s %s" not supported.\n' % + (var['typespec'], '*', var['kindselector']['*'])) + elif 'kind' in var['kindselector']: + if typespec + 'kind' in f2cmap_all: + f2cmap = f2cmap_all[typespec + 'kind'] + try: + ctype = f2cmap[var['kindselector']['kind']] + except KeyError: + if typespec in f2cmap_all: + f2cmap = f2cmap_all[typespec] + try: + ctype = f2cmap[str(var['kindselector']['kind'])] + except KeyError: + errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' + % (typespec, var['kindselector']['kind'], ctype, + typespec, var['kindselector']['kind'], os.getcwd())) + else: + if not isexternal(var): + errmess('getctype: No C-type found in "%s", assuming void.\n' % var) + return ctype + + +def f2cexpr(expr): + """Rewrite Fortran expression as f2py supported C expression. + + Due to the lack of a proper expression parser in f2py, this + function uses a heuristic approach that assumes that Fortran + arithmetic expressions are valid C arithmetic expressions when + mapping Fortran function calls to the corresponding C function/CPP + macros calls. + + """ + # TODO: support Fortran `len` function with optional kind parameter + expr = re.sub(r'\blen\b', 'f2py_slen', expr) + return expr + + +def getstrlength(var): + if isstringfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getstrlength(var['vars'][a]) + else: + errmess('getstrlength: function %s has no return value?!\n' % a) + if not isstring(var): + errmess( + 'getstrlength: expected a signature of a string but got: %s\n' % (repr(var))) + len = '1' + if 'charselector' in var: + a = var['charselector'] + if '*' in a: + len = a['*'] + elif 'len' in a: + len = f2cexpr(a['len']) + if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len): + if isintent_hide(var): + errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( + repr(var))) + len = '-1' + return len + + +def getarrdims(a, var, verbose=0): + ret = {} + if isstring(var) and not isarray(var): + ret['size'] = getstrlength(var) + ret['rank'] = '0' + ret['dims'] = '' + elif isscalar(var): + ret['size'] = '1' + ret['rank'] = '0' + ret['dims'] = '' + elif isarray(var): + dim = copy.copy(var['dimension']) + ret['size'] = '*'.join(dim) + try: + ret['size'] = repr(eval(ret['size'])) + except Exception: + pass + ret['dims'] = ','.join(dim) + ret['rank'] = repr(len(dim)) + ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1] + for i in range(len(dim)): # solve dim for dependencies + v = [] + if dim[i] in depargs: + v = [dim[i]] + else: + for va in depargs: + if re.match(r'.*?\b%s\b.*' % va, dim[i]): + v.append(va) + for va in v: + if depargs.index(va) > depargs.index(a): + dim[i] = '*' + break + ret['setdims'], i = '', -1 + for d in dim: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['setdims'], i, d) + if ret['setdims']: + ret['setdims'] = ret['setdims'][:-1] + ret['cbsetdims'], i = '', -1 + for d in var['dimension']: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, d) + elif isintent_in(var): + outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' + % (d)) + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, 0) + elif verbose: + errmess( + 'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n' % (repr(a), repr(d))) + if ret['cbsetdims']: + ret['cbsetdims'] = ret['cbsetdims'][:-1] +# if not isintent_c(var): +# var['dimension'].reverse() + return ret + + +def getpydocsign(a, var): + global lcb_map + if isfunction(var): + if 'result' in var: + af = var['result'] + else: + af = var['name'] + if af in var['vars']: + return getpydocsign(af, var['vars'][af]) + else: + errmess('getctype: function %s has no return value?!\n' % af) + return '', '' + sig, sigout = a, a + opt = '' + if isintent_in(var): + opt = 'input' + elif isintent_inout(var): + opt = 'in/output' + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + init = '' + ctype = getctype(var) + + if hasinitvalue(var): + init, showinit = getinit(a, var) + init = ', optional\\n Default: %s' % showinit + if isscalar(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], + c2pycode_map[ctype], init) + else: + sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init) + sigout = '%s : %s' % (out_a, c2py_map[ctype]) + elif isstring(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( + a, opt, getstrlength(var), init) + else: + sig = '%s : %s string(len=%s)%s' % ( + a, opt, getstrlength(var), init) + sigout = '%s : string(len=%s)' % (out_a, getstrlength(var)) + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank, + c2pycode_map[ + ctype], + ','.join(dim), init) + if a == out_a: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\ + % (a, rank, c2pycode_map[ctype], ','.join(dim)) + else: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ + % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a) + elif isexternal(var): + ua = '' + if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: + ua = lcb2_map[lcb_map[a]]['argname'] + if not ua == a: + ua = ' => %s' % ua + else: + ua = '' + sig = '%s : call-back function%s' % (a, ua) + sigout = sig + else: + errmess( + 'getpydocsign: Could not resolve docsignature for "%s".\n' % a) + return sig, sigout + + +def getarrdocsign(a, var): + ctype = getctype(var) + if isstring(var) and (not isarray(var)): + sig = '%s : rank-0 array(string(len=%s),\'c\')' % (a, + getstrlength(var)) + elif isscalar(var): + sig = '%s : rank-0 array(%s,\'%s\')' % (a, c2py_map[ctype], + c2pycode_map[ctype],) + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank, + c2pycode_map[ + ctype], + ','.join(dim)) + return sig + + +def getinit(a, var): + if isstring(var): + init, showinit = '""', "''" + else: + init, showinit = '', '' + if hasinitvalue(var): + init = var['='] + showinit = init + if iscomplex(var) or iscomplexarray(var): + ret = {} + + try: + v = var["="] + if ',' in v: + ret['init.r'], ret['init.i'] = markoutercomma( + v[1:-1]).split('@,@') + else: + v = eval(v, {}, {}) + ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) + except Exception: + raise ValueError( + 'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) + if isarray(var): + init = '(capi_c.r=%s,capi_c.i=%s,capi_c)' % ( + ret['init.r'], ret['init.i']) + elif isstring(var): + if not init: + init, showinit = '""', "''" + if init[0] == "'": + init = '"%s"' % (init[1:-1].replace('"', '\\"')) + if init[0] == '"': + showinit = "'%s'" % (init[1:-1]) + return init, showinit + + +def get_elsize(var): + if isstring(var) or isstringarray(var): + elsize = getstrlength(var) + # override with user-specified length when available: + elsize = var['charselector'].get('f2py_len', elsize) + return elsize + if ischaracter(var) or ischaracterarray(var): + return '1' + # for numerical types, PyArray_New* functions ignore specified + # elsize, so we just return 1 and let elsize be determined at + # runtime, see fortranobject.c + return '1' + + +def sign2map(a, var): + """ + varname,ctype,atype + init,init.r,init.i,pytype + vardebuginfo,vardebugshowvalue,varshowvalue + varrformat + + intent + """ + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)} + intent_flags = [] + for f, s in isintent_dict.items(): + if f(var): + intent_flags.append('F2PY_%s' % s) + if intent_flags: + # TODO: Evaluate intent_flags here. + ret['intent'] = '|'.join(intent_flags) + else: + ret['intent'] = 'F2PY_INTENT_IN' + if isarray(var): + ret['varrformat'] = 'N' + elif ret['ctype'] in c2buildvalue_map: + ret['varrformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['varrformat'] = 'O' + ret['init'], ret['showinit'] = getinit(a, var) + if hasinitvalue(var) and iscomplex(var) and not isarray(var): + ret['init.r'], ret['init.i'] = markoutercomma( + ret['init'][1:-1]).split('@,@') + if isexternal(var): + ret['cbnamekey'] = a + if a in lcb_map: + ret['cbname'] = lcb_map[a] + ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs'] + ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs'] + ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr'] + ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr'] + else: + ret['cbname'] = a + errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % ( + a, list(lcb_map.keys()))) + if isstring(var): + ret['length'] = getstrlength(var) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + dim = copy.copy(var['dimension']) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + # Debug info + if debugcapi(var): + il = [isintent_in, 'input', isintent_out, 'output', + isintent_inout, 'inoutput', isrequired, 'required', + isoptional, 'optional', isintent_hide, 'hidden', + iscomplex, 'complex scalar', + l_and(isscalar, l_not(iscomplex)), 'scalar', + isstring, 'string', isarray, 'array', + iscomplexarray, 'complex array', isstringarray, 'string array', + iscomplexfunction, 'complex function', + l_and(isfunction, l_not(iscomplexfunction)), 'function', + isexternal, 'callback', + isintent_callback, 'callback', + isintent_aux, 'auxiliary', + ] + rl = [] + for i in range(0, len(il), 2): + if il[i](var): + rl.append(il[i + 1]) + if isstring(var): + rl.append('slen(%s)=%s' % (a, ret['length'])) + if isarray(var): + ddim = ','.join( + map(lambda x, y: '%s|%s' % (x, y), var['dimension'], dim)) + rl.append('dims(%s)' % ddim) + if isexternal(var): + ret['vardebuginfo'] = 'debug-capi:%s=>%s:%s' % ( + a, ret['cbname'], ','.join(rl)) + else: + ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( + ret['ctype'], a, ret['showinit'], ','.join(rl)) + if isscalar(var): + if ret['ctype'] in cformat_map: + ret['vardebugshowvalue'] = 'debug-capi:%s=%s' % ( + a, cformat_map[ret['ctype']]) + if isstring(var): + ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isexternal(var): + ret['vardebugshowvalue'] = 'debug-capi:%s=%%p' % (a) + if ret['ctype'] in cformat_map: + ret['varshowvalue'] = '#name#:%s=%s' % (a, cformat_map[ret['ctype']]) + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isstring(var): + ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + return ret + + +def routsign2map(rout): + """ + name,NAME,begintitle,endtitle + rname,ctype,rformat + routdebugshowvalue + """ + global lcb_map + name = rout['name'] + fname = getfortranname(rout) + ret = {'name': name, + 'texname': name.replace('_', '\\_'), + 'name_lower': name.lower(), + 'NAME': name.upper(), + 'begintitle': gentitle(name), + 'endtitle': gentitle('end of %s' % name), + 'fortranname': fname, + 'FORTRANNAME': fname.upper(), + 'callstatement': getcallstatement(rout) or '', + 'usercode': getusercode(rout) or '', + 'usercode1': getusercode1(rout) or '', + } + if '_' in fname: + ret['F_FUNC'] = 'F_FUNC_US' + else: + ret['F_FUNC'] = 'F_FUNC' + if '_' in name: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' + else: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' + lcb_map = {} + if 'use' in rout: + for u in rout['use'].keys(): + if u in cb_rules.cb_map: + for un in cb_rules.cb_map[u]: + ln = un[0] + if 'map' in rout['use'][u]: + for k in rout['use'][u]['map'].keys(): + if rout['use'][u]['map'][k] == un[0]: + ln = k + break + lcb_map[ln] = un[1] + elif 'externals' in rout and rout['externals']: + errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( + ret['name'], repr(rout['externals']))) + ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + ret['ctype'] = getctype(rout['vars'][a]) + if hasresultnote(rout): + ret['resultnote'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + if ret['ctype'] in c2buildvalue_map: + ret['rformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['rformat'] = 'O' + errmess('routsign2map: no c2buildvalue key for type %s\n' % + (repr(ret['ctype']))) + if debugcapi(rout): + if ret['ctype'] in cformat_map: + ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % ( + a, cformat_map[ret['ctype']]) + if isstringfunction(rout): + ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isstringfunction(rout): + ret['rlength'] = getstrlength(rout['vars'][a]) + if ret['rlength'] == '-1': + errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % ( + repr(rout['name']))) + ret['rlength'] = '10' + if hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def modsign2map(m): + """ + modulename + """ + if ismodule(m): + ret = {'f90modulename': m['name'], + 'F90MODULENAME': m['name'].upper(), + 'texf90modulename': m['name'].replace('_', '\\_')} + else: + ret = {'modulename': m['name'], + 'MODULENAME': m['name'].upper(), + 'texmodulename': m['name'].replace('_', '\\_')} + ret['restdoc'] = getrestdoc(m) or [] + if hasnote(m): + ret['note'] = m['note'] + ret['usercode'] = getusercode(m) or '' + ret['usercode1'] = getusercode1(m) or '' + if m['body']: + ret['interface_usercode'] = getusercode(m['body'][0]) or '' + else: + ret['interface_usercode'] = '' + ret['pymethoddef'] = getpymethoddef(m) or '' + if 'coutput' in m: + ret['coutput'] = m['coutput'] + if 'f2py_wrapper_output' in m: + ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] + return ret + + +def cb_sign2map(a, var, index=None): + ret = {'varname': a} + ret['varname_i'] = ret['varname'] + ret['ctype'] = getctype(var) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + return ret + + +def cb_routsign2map(rout, um): + """ + name,begintitle,endtitle,argname + ctype,rctype,maxnofargs,nofoptargs,returncptr + """ + ret = {'name': 'cb_%s_in_%s' % (rout['name'], um), + 'returncptr': ''} + if isintent_callback(rout): + if '_' in rout['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + ret['callbackname'] = '%s(%s,%s)' \ + % (F_FUNC, + rout['name'].lower(), + rout['name'].upper(), + ) + ret['static'] = 'extern' + else: + ret['callbackname'] = ret['name'] + ret['static'] = 'static' + ret['argname'] = rout['name'] + ret['begintitle'] = gentitle(ret['name']) + ret['endtitle'] = gentitle('end of %s' % ret['name']) + ret['ctype'] = getctype(rout) + ret['rctype'] = 'void' + if ret['ctype'] == 'string': + ret['rctype'] = 'void' + else: + ret['rctype'] = ret['ctype'] + if ret['rctype'] != 'void': + if iscomplexfunction(rout): + ret['returncptr'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +return_value= +#endif +""" + else: + ret['returncptr'] = 'return_value=' + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isstringfunction(rout): + ret['strlength'] = getstrlength(rout) + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if hasnote(rout['vars'][a]): + ret['note'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + if iscomplexfunction(rout): + ret['rctype'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +#ctype# +#else +void +#endif +""" + else: + if hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + nofargs = 0 + nofoptargs = 0 + if 'args' in rout and 'vars' in rout: + for a in rout['args']: + var = rout['vars'][a] + if l_or(isintent_in, isintent_inout)(var): + nofargs = nofargs + 1 + if isoptional(var): + nofoptargs = nofoptargs + 1 + ret['maxnofargs'] = repr(nofargs) + ret['nofoptargs'] = repr(nofoptargs) + if hasnote(rout) and isfunction(rout) and 'result' in rout: + ret['routnote'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def common_sign2map(a, var): # obsolute + ret = {'varname': a, 'ctype': getctype(var)} + if isstringarray(var): + ret['ctype'] = 'char' + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + elif isstring(var): + ret['size'] = getstrlength(var) + ret['rank'] = '1' + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + # for strings this returns 0-rank but actually is 1-rank + ret['arrdocstr'] = getarrdocsign(a, var) + return ret diff --git a/phivenv/Lib/site-packages/numpy/f2py/cb_rules.py b/phivenv/Lib/site-packages/numpy/f2py/cb_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..649fdbce748d7f0f49649ea1d5180045013d6320 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/cb_rules.py @@ -0,0 +1,644 @@ +""" +Build call-back mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__ +from .auxfuncs import ( + applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray, + iscomplex, iscomplexarray, iscomplexfunction, isfunction, isintent_c, + isintent_hide, isintent_in, isintent_inout, isintent_nothide, + isintent_out, isoptional, isrequired, isscalar, isstring, + isstringfunction, issubroutine, l_and, l_not, l_or, outmess, replace, + stripcomma, throw_error +) +from . import cfuncs + +f2py_version = __version__.version + + +################## Rules for callback function ############## + +cb_routine_rules = { + 'cbtypedefs': 'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', + 'body': """ +#begintitle# +typedef struct { + PyObject *capi; + PyTupleObject *args_capi; + int nofargs; + jmp_buf jmpbuf; +} #name#_t; + +#if defined(F2PY_THREAD_LOCAL_DECL) && !defined(F2PY_USE_PYTHON_TLS) + +static F2PY_THREAD_LOCAL_DECL #name#_t *_active_#name# = NULL; + +static #name#_t *swap_active_#name#(#name#_t *ptr) { + #name#_t *prev = _active_#name#; + _active_#name# = ptr; + return prev; +} + +static #name#_t *get_active_#name#(void) { + return _active_#name#; +} + +#else + +static #name#_t *swap_active_#name#(#name#_t *ptr) { + char *key = "__f2py_cb_#name#"; + return (#name#_t *)F2PySwapThreadLocalCallbackPtr(key, ptr); +} + +static #name#_t *get_active_#name#(void) { + char *key = "__f2py_cb_#name#"; + return (#name#_t *)F2PyGetThreadLocalCallbackPtr(key); +} + +#endif + +/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ +#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { + #name#_t cb_local = { NULL, NULL, 0 }; + #name#_t *cb = NULL; + PyTupleObject *capi_arglist = NULL; + PyObject *capi_return = NULL; + PyObject *capi_tmp = NULL; + PyObject *capi_arglist_list = NULL; + int capi_j,capi_i = 0; + int capi_longjmp_ok = 1; +#decl# +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_clock(); +#endif + cb = get_active_#name#(); + if (cb == NULL) { + capi_longjmp_ok = 0; + cb = &cb_local; + } + capi_arglist = cb->args_capi; + CFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); + CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); + if (cb->capi==NULL) { + capi_longjmp_ok = 0; + cb->capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); + CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); + } + if (cb->capi==NULL) { + PyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); + goto capi_fail; + } + if (F2PyCapsule_Check(cb->capi)) { + #name#_typedef #name#_cptr; + #name#_cptr = F2PyCapsule_AsVoidPtr(cb->capi); + #returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); + #return# + } + if (capi_arglist==NULL) { + capi_longjmp_ok = 0; + capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); + if (capi_tmp) { + capi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); + Py_DECREF(capi_tmp); + if (capi_arglist==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); + goto capi_fail; + } + } else { + PyErr_Clear(); + capi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); + } + } + if (capi_arglist == NULL) { + PyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); + goto capi_fail; + } +#setdims# +#ifdef PYPY_VERSION +#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) + capi_arglist_list = PySequence_List((PyObject *)capi_arglist); + if (capi_arglist_list == NULL) goto capi_fail; +#else +#define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) +#endif +#pyobjfrom# +#undef CAPI_ARGLIST_SETITEM +#ifdef PYPY_VERSION + CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list); +#else + CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); +#endif + CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_call_clock(); +#endif +#ifdef PYPY_VERSION + capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist_list); + Py_DECREF(capi_arglist_list); + capi_arglist_list = NULL; +#else + capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist); +#endif +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_call_clock(); +#endif + CFUNCSMESSPY(\"cb:capi_return=\",capi_return); + if (capi_return == NULL) { + fprintf(stderr,\"capi_return is NULL\\n\"); + goto capi_fail; + } + if (capi_return == Py_None) { + Py_DECREF(capi_return); + capi_return = Py_BuildValue(\"()\"); + } + else if (!PyTuple_Check(capi_return)) { + capi_return = Py_BuildValue(\"(N)\",capi_return); + } + capi_j = PyTuple_Size(capi_return); + capi_i = 0; +#frompyobj# + CFUNCSMESS(\"cb:#name#:successful\\n\"); + Py_DECREF(capi_return); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_clock(); +#endif + goto capi_return_pt; +capi_fail: + fprintf(stderr,\"Call-back #name# failed.\\n\"); + Py_XDECREF(capi_return); + Py_XDECREF(capi_arglist_list); + if (capi_longjmp_ok) { + longjmp(cb->jmpbuf,-1); + } +capi_return_pt: + ; +#return# +} +#endtitle# +""", + 'need': ['setjmp.h', 'CFUNCSMESS', 'F2PY_THREAD_LOCAL_DECL'], + 'maxnofargs': '#maxnofargs#', + 'nofoptargs': '#nofoptargs#', + 'docstr': """\ + def #argname#(#docsignature#): return #docreturn#\\n\\ +#docstrsigns#""", + 'latexdocstr': """ +{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} +#routnote# + +#latexdocstrsigns#""", + 'docstrshort': 'def #argname#(#docsignature#): return #docreturn#' +} +cb_rout_rules = [ + { # Init + 'separatorsfor': {'decl': '\n', + 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n', + 'args_td': ',', 'optargs_td': '', + 'args_nm': ',', 'optargs_nm': '', + 'frompyobj': '\n', 'setdims': '\n', + 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/', + 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/', + 'args_td': [], 'optargs_td': '', 'strarglens_td': '', + 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '', + 'noargs': '', + 'setdims': '/*setdims*/', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': ' Required arguments:', + 'docstropt': ' Optional arguments:', + 'docstrout': ' Return objects:', + 'docstrcbs': ' Call-back functions:', + 'docreturn': '', 'docsign': '', 'docsignopt': '', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { # Function + 'decl': ' #ctype# return_value = 0;', + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'}, + '''\ + if (capi_j>capi_i) { + GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#, + "#ctype#_from_pyobj failed in converting return_value of" + " call-back function #name# to C #ctype#\\n"); + } else { + fprintf(stderr,"Warning: call-back function #name# did not provide" + " return value (index=%d, type=#ctype#)\\n",capi_i); + }''', + {debugcapi: + ' fprintf(stderr,"#showvalueformat#.\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'], + 'return': ' return return_value;', + '_check': l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction)) + }, + { # String function + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, + 'args': '#ctype# return_value,int return_value_len', + 'args_nm': 'return_value,&return_value_len', + 'args_td': '#ctype# ,int', + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->\\"");'}, + """\ + if (capi_j>capi_i) { + GETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len); + } else { + fprintf(stderr,"Warning: call-back function #name# did not provide" + " return value (index=%d, type=#ctype#)\\n",capi_i); + }""", + {debugcapi: + ' fprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSTRFROMPYTUPLE'], + 'return': 'return;', + '_check': isstringfunction + }, + { # Complex function + 'optargs': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# *return_value +#endif +""", + 'optargs_nm': """ +#ifndef F2PY_CB_RETURNCOMPLEX +return_value +#endif +""", + 'optargs_td': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# * +#endif +""", + 'decl': """ +#ifdef F2PY_CB_RETURNCOMPLEX + #ctype# return_value = {0, 0}; +#endif +""", + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'}, + """\ + if (capi_j>capi_i) { +#ifdef F2PY_CB_RETURNCOMPLEX + GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#, + \"#ctype#_from_pyobj failed in converting return_value of call-back\" + \" function #name# to C #ctype#\\n\"); +#else + GETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#, + \"#ctype#_from_pyobj failed in converting return_value of call-back\" + \" function #name# to C #ctype#\\n\"); +#endif + } else { + fprintf(stderr, + \"Warning: call-back function #name# did not provide\" + \" return value (index=%d, type=#ctype#)\\n\",capi_i); + }""", + {debugcapi: """\ +#ifdef F2PY_CB_RETURNCOMPLEX + fprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); +#else + fprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); +#endif +"""} + ], + 'return': """ +#ifdef F2PY_CB_RETURNCOMPLEX + return return_value; +#else + return; +#endif +""", + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], + '_check': iscomplexfunction + }, + {'docstrout': ' #pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasnote: '--- #note#'}], + 'docreturn': '#rname#,', + '_check': isfunction}, + {'_check': issubroutine, 'return': 'return;'} +] + +cb_arg_rules = [ + { # Doc + 'docstropt': {l_and(isoptional, isintent_nothide): ' #pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): ' #pydocsign#'}, + 'docstrout': {isintent_out: ' #pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'docsign': {l_and(isrequired, isintent_nothide): '#varname#,'}, + 'docsignopt': {l_and(isoptional, isintent_nothide): '#varname#,'}, + 'depend': '' + }, + { + 'args': { + l_and(isscalar, isintent_c): '#ctype# #varname_i#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *#varname_i#_cb_capi', + isarray: '#ctype# *#varname_i#', + isstring: '#ctype# #varname_i#' + }, + 'args_nm': { + l_and(isscalar, isintent_c): '#varname_i#', + l_and(isscalar, l_not(isintent_c)): '#varname_i#_cb_capi', + isarray: '#varname_i#', + isstring: '#varname_i#' + }, + 'args_td': { + l_and(isscalar, isintent_c): '#ctype#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *', + isarray: '#ctype# *', + isstring: '#ctype#' + }, + 'need': {l_or(isscalar, isarray, isstring): '#ctype#'}, + # untested with multiple args + 'strarglens': {isstring: ',int #varname_i#_cb_len'}, + 'strarglens_td': {isstring: ',int'}, # untested with multiple args + # untested with multiple args + 'strarglens_nm': {isstring: ',#varname_i#_cb_len'}, + }, + { # Scalars + 'decl': {l_not(isintent_c): ' #ctype# #varname_i#=(*#varname_i#_cb_capi);'}, + 'error': {l_and(isintent_c, isintent_out, + throw_error('intent(c,out) is forbidden for callback scalar arguments')): + ''}, + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'}, + {isintent_out: + ' if (capi_j>capi_i)\n GETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, + {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)): + ' fprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, + {l_and(debugcapi, l_and(l_not(iscomplex), l_not( isintent_c))): + ' fprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, + {l_and(debugcapi, l_and(iscomplex, isintent_c)): + ' fprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, + {l_and(debugcapi, l_and(iscomplex, l_not( isintent_c))): + ' fprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, + ], + 'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, + {debugcapi: 'CFUNCSMESS'}], + '_check': isscalar + }, { + 'pyobjfrom': [{isintent_in: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1(#varname_i#))) + goto capi_fail;"""}, + {isintent_inout: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) + goto capi_fail;"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}, + {iscomplex: '#ctype#'}], + '_check': l_and(isscalar, isintent_nothide), + '_optional': '' + }, { # String + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->\\"");'}, + """ if (capi_j>capi_i) + GETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", + {debugcapi: + ' fprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, + ], + 'need': ['#ctype#', 'GETSTRFROMPYTUPLE', + {debugcapi: 'CFUNCSMESS'}, 'string.h'], + '_check': l_and(isstring, isintent_out) + }, { + 'pyobjfrom': [ + {debugcapi: + (' fprintf(stderr,"debug-capi:cb:#varname#=#showvalueformat#:' + '%d:\\n",#varname_i#,#varname_i#_cb_len);')}, + {isintent_in: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) + goto capi_fail;"""}, + {isintent_inout: """\ + if (cb->nofargs>capi_i) { + int #varname_i#_cb_dims[] = {#varname_i#_cb_len}; + if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) + goto capi_fail; + }"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1size'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}], + '_check': l_and(isstring, isintent_nothide), + '_optional': '' + }, + # Array ... + { + 'decl': ' npy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};', + 'setdims': ' #cbsetdims#;', + '_check': isarray, + '_depend': '' + }, + { + 'pyobjfrom': [{debugcapi: ' fprintf(stderr,"debug-capi:cb:#varname#\\n");'}, + {isintent_c: """\ + if (cb->nofargs>capi_i) { + /* tmp_arr will be inserted to capi_arglist_list that will be + destroyed when leaving callback function wrapper together + with tmp_arr. */ + PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type, + #rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,#elsize#, + NPY_ARRAY_CARRAY,NULL); +""", + l_not(isintent_c): """\ + if (cb->nofargs>capi_i) { + /* tmp_arr will be inserted to capi_arglist_list that will be + destroyed when leaving callback function wrapper together + with tmp_arr. */ + PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type, + #rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,#elsize#, + NPY_ARRAY_FARRAY,NULL); +""", + }, + """ + if (tmp_arr==NULL) + goto capi_fail; + if (CAPI_ARGLIST_SETITEM(capi_i++,(PyObject *)tmp_arr)) + goto capi_fail; +}"""], + '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), + '_optional': '', + }, { + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'}, + """ if (capi_j>capi_i) { + PyArrayObject *rv_cb_arr = NULL; + if ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; + rv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""", + {isintent_c: '|F2PY_INTENT_C'}, + """,capi_tmp); + if (rv_cb_arr == NULL) { + fprintf(stderr,\"rv_cb_arr is NULL\\n\"); + goto capi_fail; + } + MEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr)); + if (capi_tmp != (PyObject *)rv_cb_arr) { + Py_DECREF(rv_cb_arr); + } + }""", + {debugcapi: ' fprintf(stderr,"<-.\\n");'}, + ], + 'need': ['MEMCOPY', {iscomplexarray: '#ctype#'}], + '_check': l_and(isarray, isintent_out) + }, { + 'docreturn': '#varname#,', + '_check': isintent_out + } +] + +################## Build call-back module ############# +cb_map = {} + + +def buildcallbacks(m): + cb_map[m['name']] = [] + for bi in m['body']: + if bi['block'] == 'interface': + for b in bi['body']: + if b: + buildcallback(b, m['name']) + else: + errmess('warning: empty body for %s\n' % (m['name'])) + + +def buildcallback(rout, um): + from . import capi_maps + + outmess(' Constructing call-back function "cb_%s_in_%s"\n' % + (rout['name'], um)) + args, depargs = getargs(rout) + capi_maps.depargs = depargs + var = rout['vars'] + vrd = capi_maps.cb_routsign2map(rout, um) + rd = dictappend({}, vrd) + cb_map[um].append([rout['name'], rd['name']]) + for r in cb_rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + savevrd = {} + for i, a in enumerate(args): + vrd = capi_maps.cb_sign2map(a, var[a], index=i) + savevrd[a] = vrd + for r in cb_arg_rules: + if '_depend' in r: + continue + if '_optional' in r and isoptional(var[a]): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in args: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' in r: + continue + if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' not in r: + continue + if '_optional' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'args' in rd and 'optargs' in rd: + if isinstance(rd['optargs'], list): + rd['optargs'] = rd['optargs'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_nm'] = rd['optargs_nm'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_td'] = rd['optargs_td'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + optargs = stripcomma(replace('#docsignopt#', + {'docsignopt': rd['docsignopt']} + )) + if optargs == '': + rd['docsignature'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignature'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_') + rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ') + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + if 'args' not in rd: + rd['args'] = '' + rd['args_td'] = '' + rd['args_nm'] = '' + if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): + rd['noargs'] = 'void' + + ar = applyrules(cb_routine_rules, rd) + cfuncs.callbacks[rd['name']] = ar['body'] + if isinstance(ar['need'], str): + ar['need'] = [ar['need']] + + if 'need' in rd: + for t in cfuncs.typedefs.keys(): + if t in rd['need']: + ar['need'].append(t) + + cfuncs.typedefs_generated[rd['name'] + '_typedef'] = ar['cbtypedefs'] + ar['need'].append(rd['name'] + '_typedef') + cfuncs.needs[rd['name']] = ar['need'] + + capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'], + 'nofoptargs': ar['nofoptargs'], + 'docstr': ar['docstr'], + 'latexdocstr': ar['latexdocstr'], + 'argname': rd['argname'] + } + outmess(' %s\n' % (ar['docstrshort'])) + return +################## Build call-back function ############# diff --git a/phivenv/Lib/site-packages/numpy/f2py/cfuncs.py b/phivenv/Lib/site-packages/numpy/f2py/cfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..5eb8ffade516774402eae491b8fc0cd24d9a52fc --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/cfuncs.py @@ -0,0 +1,1545 @@ +#!/usr/bin/env python3 +""" +C declarations, CPP macros, and C functions for f2py2e. +Only required declarations/macros/functions will be used. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import sys +import copy + +from . import __version__ + +f2py_version = __version__.version + + +def errmess(s: str) -> None: + """ + Write an error message to stderr. + + This indirection is needed because sys.stderr might not always be available (see #26862). + """ + if sys.stderr is not None: + sys.stderr.write(s) + +##################### Definitions ################## + +outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [], + 'userincludes': [], + 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [], + 'commonhooks': []} +needs = {} +includes0 = {'includes0': '/*need_includes0*/'} +includes = {'includes': '/*need_includes*/'} +userincludes = {'userincludes': '/*need_userincludes*/'} +typedefs = {'typedefs': '/*need_typedefs*/'} +typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'} +cppmacros = {'cppmacros': '/*need_cppmacros*/'} +cfuncs = {'cfuncs': '/*need_cfuncs*/'} +callbacks = {'callbacks': '/*need_callbacks*/'} +f90modhooks = {'f90modhooks': '/*need_f90modhooks*/', + 'initf90modhooksstatic': '/*initf90modhooksstatic*/', + 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', + } +commonhooks = {'commonhooks': '/*need_commonhooks*/', + 'initcommonhooks': '/*need_initcommonhooks*/', + } + +############ Includes ################### + +includes0['math.h'] = '#include ' +includes0['string.h'] = '#include ' +includes0['setjmp.h'] = '#include ' + +includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API +#include "arrayobject.h"''' +includes['npy_math.h'] = '#include "numpy/npy_math.h"' + +includes['arrayobject.h'] = '#include "fortranobject.h"' +includes['stdarg.h'] = '#include ' + +############# Type definitions ############### + +typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;' +typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;' +typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;' +typedefs['signed_char'] = 'typedef signed char signed_char;' +typedefs['long_long'] = """ +#if defined(NPY_OS_WIN32) +typedef __int64 long_long; +#else +typedef long long long_long; +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['unsigned_long_long'] = """ +#if defined(NPY_OS_WIN32) +typedef __uint64 long_long; +#else +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['long_double'] = """ +#ifndef _LONG_DOUBLE +typedef long double long_double; +#endif +""" +typedefs[ + 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;' +typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;' +typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;' +typedefs['string'] = """typedef char * string;""" +typedefs['character'] = """typedef char character;""" + + +############### CPP macros #################### +cppmacros['CFUNCSMESS'] = """ +#ifdef DEBUGCFUNCS +#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); +#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +#else +#define CFUNCSMESS(mess) +#define CFUNCSMESSPY(mess,obj) +#endif +""" +cppmacros['F_FUNC'] = """ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F +#else +#define F_FUNC(f,F) _##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F##_ +#else +#define F_FUNC(f,F) _##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F +#else +#define F_FUNC(f,F) f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F##_ +#else +#define F_FUNC(f,F) f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_FUNC_US(f,F) F_FUNC(f##_,F##_) +#else +#define F_FUNC_US(f,F) F_FUNC(f,F) +#endif +""" +cppmacros['F_WRAPPEDFUNC'] = """ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) +#else +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) +#endif +""" +cppmacros['F_MODFUNC'] = """ +#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f +#else +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f +#else +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) f ## .in. ## m +#else +#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ +#endif +#endif +/* +#if defined(UPPERCASE_FORTRAN) +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) +#else +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) +#endif +*/ + +#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) +""" +cppmacros['SWAPUNSAFE'] = """ +#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) +""" +cppmacros['SWAP'] = """ +#define SWAP(a,b,t) {\\ + t *c;\\ + c = a;\\ + a = b;\\ + b = c;} +""" +# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & +# NPY_ARRAY_C_CONTIGUOUS)' +cppmacros['PRINTPYOBJERR'] = """ +#define PRINTPYOBJERR(obj)\\ + fprintf(stderr,\"#modulename#.error is related to \");\\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +""" +cppmacros['MINMAX'] = """ +#ifndef max +#define max(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef min +#define min(a,b) ((a < b) ? (a) : (b)) +#endif +#ifndef MAX +#define MAX(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef MIN +#define MIN(a,b) ((a < b) ? (a) : (b)) +#endif +""" +cppmacros['len..'] = """ +/* See fortranobject.h for definitions. The macros here are provided for BC. */ +#define rank f2py_rank +#define shape f2py_shape +#define fshape f2py_shape +#define len f2py_len +#define flen f2py_flen +#define slen f2py_slen +#define size f2py_size +""" +cppmacros['pyobj_from_char1'] = r""" +#define pyobj_from_char1(v) (PyLong_FromLong(v)) +""" +cppmacros['pyobj_from_short1'] = r""" +#define pyobj_from_short1(v) (PyLong_FromLong(v)) +""" +needs['pyobj_from_int1'] = ['signed_char'] +cppmacros['pyobj_from_int1'] = r""" +#define pyobj_from_int1(v) (PyLong_FromLong(v)) +""" +cppmacros['pyobj_from_long1'] = r""" +#define pyobj_from_long1(v) (PyLong_FromLong(v)) +""" +needs['pyobj_from_long_long1'] = ['long_long'] +cppmacros['pyobj_from_long_long1'] = """ +#ifdef HAVE_LONG_LONG +#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) +#else +#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. +#define pyobj_from_long_long1(v) (PyLong_FromLong(v)) +#endif +""" +needs['pyobj_from_long_double1'] = ['long_double'] +cppmacros['pyobj_from_long_double1'] = """ +#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))""" +cppmacros['pyobj_from_double1'] = """ +#define pyobj_from_double1(v) (PyFloat_FromDouble(v))""" +cppmacros['pyobj_from_float1'] = """ +#define pyobj_from_float1(v) (PyFloat_FromDouble(v))""" +needs['pyobj_from_complex_long_double1'] = ['complex_long_double'] +cppmacros['pyobj_from_complex_long_double1'] = """ +#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_complex_double1'] = ['complex_double'] +cppmacros['pyobj_from_complex_double1'] = """ +#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_complex_float1'] = ['complex_float'] +cppmacros['pyobj_from_complex_float1'] = """ +#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_string1'] = ['string'] +cppmacros['pyobj_from_string1'] = """ +#define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))""" +needs['pyobj_from_string1size'] = ['string'] +cppmacros['pyobj_from_string1size'] = """ +#define pyobj_from_string1size(v,len) (PyUnicode_FromStringAndSize((char *)v, len))""" +needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYPYARRAYTEMPLATE'] = """ +/* New SciPy */ +#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break; + +#define TRYPYARRAYTEMPLATE(ctype,typecode) \\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\ + default: return -2;\\ + };\\ + return 1 +""" + +needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """ +#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; +#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {\\ + *(ctype *)(PyArray_DATA(arr))=(*v).r;\\ + *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\ + return 1;\\ + }\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_double *)(PyArray_DATA(arr)+sizeof(npy_double))=(*v).i;\\ + break;\\ + case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_float *)(PyArray_DATA(arr)+sizeof(npy_float))=(*v).i;\\ + break;\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;\\ + break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ + default: return -2;\\ + };\\ + return -1; +""" +# cppmacros['NUMFROMARROBJ']=""" +# define NUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ +# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ +# cppmacros['CNUMFROMARROBJ']=""" +# define CNUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ + + +needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR'] +cppmacros['GETSTRFROMPYTUPLE'] = """ +#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ + PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ + if (rv_cb_str == NULL)\\ + goto capi_fail;\\ + if (PyBytes_Check(rv_cb_str)) {\\ + str[len-1]='\\0';\\ + STRINGCOPYN((str),PyBytes_AS_STRING((PyBytesObject*)rv_cb_str),(len));\\ + } else {\\ + PRINTPYOBJERR(rv_cb_str);\\ + PyErr_SetString(#modulename#_error,\"string object expected\");\\ + goto capi_fail;\\ + }\\ + } +""" +cppmacros['GETSCALARFROMPYTUPLE'] = """ +#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ + if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ + if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ + goto capi_fail;\\ + } +""" + +cppmacros['FAILNULL'] = """\ +#define FAILNULL(p) do { \\ + if ((p) == NULL) { \\ + PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ + goto capi_fail; \\ + } \\ +} while (0) +""" +needs['MEMCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['MEMCOPY'] = """ +#define MEMCOPY(to,from,n)\\ + do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) +""" +cppmacros['STRINGMALLOC'] = """ +#define STRINGMALLOC(str,len)\\ + if ((str = (string)malloc(len+1)) == NULL) {\\ + PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ + goto capi_fail;\\ + } else {\\ + (str)[len] = '\\0';\\ + } +""" +cppmacros['STRINGFREE'] = """ +#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) +""" +needs['STRINGPADN'] = ['string.h'] +cppmacros['STRINGPADN'] = """ +/* +STRINGPADN replaces null values with padding values from the right. + +`to` must have size of at least N bytes. + +If the `to[N-1]` has null value, then replace it and all the +preceding, nulls with the given padding. + +STRINGPADN(to, N, PADDING, NULLVALUE) is an inverse operation. +*/ +#define STRINGPADN(to, N, NULLVALUE, PADDING) \\ + do { \\ + int _m = (N); \\ + char *_to = (to); \\ + for (_m -= 1; _m >= 0 && _to[_m] == NULLVALUE; _m--) { \\ + _to[_m] = PADDING; \\ + } \\ + } while (0) +""" +needs['STRINGCOPYN'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPYN'] = """ +/* +STRINGCOPYN copies N bytes. + +`to` and `from` buffers must have sizes of at least N bytes. +*/ +#define STRINGCOPYN(to,from,N) \\ + do { \\ + int _m = (N); \\ + char *_to = (to); \\ + char *_from = (from); \\ + FAILNULL(_to); FAILNULL(_from); \\ + (void)strncpy(_to, _from, _m); \\ + } while (0) +""" +needs['STRINGCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPY'] = """ +#define STRINGCOPY(to,from)\\ + do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) +""" +cppmacros['CHECKGENERIC'] = """ +#define CHECKGENERIC(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKARRAY'] = """ +#define CHECKARRAY(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSTRING'] = """ +#define CHECKSTRING(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ + PyErr_SetString(#modulename#_error, errstring);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSCALAR'] = """ +#define CHECKSCALAR(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ + PyErr_SetString(#modulename#_error,errstring);\\ + /*goto capi_fail;*/\\ + } else """ +# cppmacros['CHECKDIMS']=""" +# define CHECKDIMS(dims,rank) \\ +# for (int i=0;i<(rank);i++)\\ +# if (dims[i]<0) {\\ +# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ +# goto capi_fail;\\ +# } +# """ +cppmacros[ + 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' +cppmacros['OLDPYNUM'] = """ +#ifdef OLDPYNUM +#error You need to install NumPy version 0.13 or higher. See https://scipy.org/install.html +#endif +""" +cppmacros["F2PY_THREAD_LOCAL_DECL"] = """ +#ifndef F2PY_THREAD_LOCAL_DECL +#if defined(_MSC_VER) +#define F2PY_THREAD_LOCAL_DECL __declspec(thread) +#elif defined(NPY_OS_MINGW) +#define F2PY_THREAD_LOCAL_DECL __thread +#elif defined(__STDC_VERSION__) \\ + && (__STDC_VERSION__ >= 201112L) \\ + && !defined(__STDC_NO_THREADS__) \\ + && (!defined(__GLIBC__) || __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 12)) \\ + && !defined(NPY_OS_OPENBSD) && !defined(NPY_OS_HAIKU) +/* __STDC_NO_THREADS__ was first defined in a maintenance release of glibc 2.12, + see https://lists.gnu.org/archive/html/commit-hurd/2012-07/msg00180.html, + so `!defined(__STDC_NO_THREADS__)` may give false positive for the existence + of `threads.h` when using an older release of glibc 2.12 + See gh-19437 for details on OpenBSD */ +#include +#define F2PY_THREAD_LOCAL_DECL thread_local +#elif defined(__GNUC__) \\ + && (__GNUC__ > 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 4))) +#define F2PY_THREAD_LOCAL_DECL __thread +#endif +#endif +""" +################# C functions ############### + +cfuncs['calcarrindex'] = """ +static int calcarrindex(int *i,PyArrayObject *arr) { + int k,ii = i[0]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['calcarrindextr'] = """ +static int calcarrindextr(int *i,PyArrayObject *arr) { + int k,ii = i[PyArray_NDIM(arr)-1]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['forcomb'] = """ +static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; +static int initforcomb(npy_intp *dims,int nd,int tr) { + int k; + if (dims==NULL) return 0; + if (nd<0) return 0; + forcombcache.nd = nd; + forcombcache.d = dims; + forcombcache.tr = tr; + if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + for (k=1;k PyArray_NBYTES(arr)) { + n = PyArray_NBYTES(arr); + } + STRINGCOPYN(buf, str, n); + return 1; + } +capi_fail: + PRINTPYOBJERR(obj); + PyErr_SetString(#modulename#_error, \"try_pyarr_from_string failed\"); + return 0; +} +""" +needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN'] +cfuncs['string_from_pyobj'] = """ +/* + Create a new string buffer `str` of at most length `len` from a + Python string-like object `obj`. + + The string buffer has given size (len) or the size of inistr when len==-1. + + The string buffer is padded with blanks: in Fortran, trailing blanks + are insignificant contrary to C nulls. + */ +static int +string_from_pyobj(string *str, int *len, const string inistr, PyObject *obj, + const char *errmess) +{ + PyObject *tmp = NULL; + string buf = NULL; + npy_intp n = -1; +#ifdef DEBUGCFUNCS +fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\", + (char*)str, *len, (char *)inistr, obj); +#endif + if (obj == Py_None) { + n = strlen(inistr); + buf = inistr; + } + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (!ISCONTIGUOUS(arr)) { + PyErr_SetString(PyExc_ValueError, + \"array object is non-contiguous.\"); + goto capi_fail; + } + n = PyArray_NBYTES(arr); + buf = PyArray_DATA(arr); + n = strnlen(buf, n); + } + else { + if (PyBytes_Check(obj)) { + tmp = obj; + Py_INCREF(tmp); + } + else if (PyUnicode_Check(obj)) { + tmp = PyUnicode_AsASCIIString(obj); + } + else { + PyObject *tmp2; + tmp2 = PyObject_Str(obj); + if (tmp2) { + tmp = PyUnicode_AsASCIIString(tmp2); + Py_DECREF(tmp2); + } + else { + tmp = NULL; + } + } + if (tmp == NULL) goto capi_fail; + n = PyBytes_GET_SIZE(tmp); + buf = PyBytes_AS_STRING(tmp); + } + if (*len == -1) { + /* TODO: change the type of `len` so that we can remove this */ + if (n > NPY_MAX_INT) { + PyErr_SetString(PyExc_OverflowError, + "object too large for a 32-bit int"); + goto capi_fail; + } + *len = n; + } + else if (*len < n) { + /* discard the last (len-n) bytes of input buf */ + n = *len; + } + if (n < 0 || *len < 0 || buf == NULL) { + goto capi_fail; + } + STRINGMALLOC(*str, *len); // *str is allocated with size (*len + 1) + if (n < *len) { + /* + Pad fixed-width string with nulls. The caller will replace + nulls with blanks when the corresponding argument is not + intent(c). + */ + memset(*str + n, '\\0', *len - n); + } + STRINGCOPYN(*str, buf, n); + Py_XDECREF(tmp); + return 1; +capi_fail: + Py_XDECREF(tmp); + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + +cfuncs['character_from_pyobj'] = """ +static int +character_from_pyobj(character* v, PyObject *obj, const char *errmess) { + if (PyBytes_Check(obj)) { + /* empty bytes has trailing null, so dereferencing is always safe */ + *v = PyBytes_AS_STRING(obj)[0]; + return 1; + } else if (PyUnicode_Check(obj)) { + PyObject* tmp = PyUnicode_AsASCIIString(obj); + if (tmp != NULL) { + *v = PyBytes_AS_STRING(tmp)[0]; + Py_DECREF(tmp); + return 1; + } + } else if (PyArray_Check(obj)) { + PyArrayObject* arr = (PyArrayObject*)obj; + if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) { + *v = PyArray_BYTES(arr)[0]; + return 1; + } else if (F2PY_IS_UNICODE_ARRAY(arr)) { + // TODO: update when numpy will support 1-byte and + // 2-byte unicode dtypes + PyObject* tmp = PyUnicode_FromKindAndData( + PyUnicode_4BYTE_KIND, + PyArray_BYTES(arr), + (PyArray_NBYTES(arr)>0?1:0)); + if (tmp != NULL) { + if (character_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + } else if (PySequence_Check(obj)) { + PyObject* tmp = PySequence_GetItem(obj,0); + if (tmp != NULL) { + if (character_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + { + /* TODO: This error (and most other) error handling needs cleaning. */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + strcpy(mess, errmess); + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = PyExc_TypeError; + Py_INCREF(err); + } + else { + Py_INCREF(err); + PyErr_Clear(); + } + sprintf(mess + strlen(mess), + " -- expected str|bytes|sequence-of-str-or-bytes, got "); + f2py_describe(obj, mess + strlen(mess)); + PyErr_SetString(err, mess); + Py_DECREF(err); + } + return 0; +} +""" + +# TODO: These should be dynamically generated, too many mapped to int things, +# see note in _isocbind.py +needs['char_from_pyobj'] = ['int_from_pyobj'] +cfuncs['char_from_pyobj'] = """ +static int +char_from_pyobj(char* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (char)i; + return 1; + } + return 0; +} +""" + + +needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char'] +cfuncs['signed_char_from_pyobj'] = """ +static int +signed_char_from_pyobj(signed_char* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (signed_char)i; + return 1; + } + return 0; +} +""" + + +needs['short_from_pyobj'] = ['int_from_pyobj'] +cfuncs['short_from_pyobj'] = """ +static int +short_from_pyobj(short* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (short)i; + return 1; + } + return 0; +} +""" + + +cfuncs['int_from_pyobj'] = """ +static int +int_from_pyobj(int* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = Npy__PyLong_AsInt(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = Npy__PyLong_AsInt(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (int_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + + +cfuncs['long_from_pyobj'] = """ +static int +long_from_pyobj(long* v, PyObject *obj, const char *errmess) { + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = PyLong_AsLong(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = PyLong_AsLong(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (long_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + + +needs['long_long_from_pyobj'] = ['long_long'] +cfuncs['long_long_from_pyobj'] = """ +static int +long_long_from_pyobj(long_long* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = PyLong_AsLongLong(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = PyLong_AsLongLong(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (long_long_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double'] +cfuncs['long_double_from_pyobj'] = """ +static int +long_double_from_pyobj(long_double* v, PyObject *obj, const char *errmess) +{ + double d=0; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, LongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(obj)); + return 1; + } + } + if (double_from_pyobj(&d, obj, errmess)) { + *v = (long_double)d; + return 1; + } + return 0; +} +""" + + +cfuncs['double_from_pyobj'] = """ +static int +double_from_pyobj(double* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + if (PyFloat_Check(obj)) { + *v = PyFloat_AsDouble(obj); + return !(*v == -1.0 && PyErr_Occurred()); + } + + tmp = PyNumber_Float(obj); + if (tmp) { + *v = PyFloat_AsDouble(tmp); + Py_DECREF(tmp); + return !(*v == -1.0 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['float_from_pyobj'] = ['double_from_pyobj'] +cfuncs['float_from_pyobj'] = """ +static int +float_from_pyobj(float* v, PyObject *obj, const char *errmess) +{ + double d=0.0; + if (double_from_pyobj(&d,obj,errmess)) { + *v = (float)d; + return 1; + } + return 0; +} +""" + + +needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double', + 'complex_double_from_pyobj', 'npy_math.h'] +cfuncs['complex_long_double_from_pyobj'] = """ +static int +complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess) +{ + complex_double cd = {0.0,0.0}; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, CLongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { + (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(obj)))); + (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(obj)))); + return 1; + } + } + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (long_double)cd.r; + (*v).i = (long_double)cd.i; + return 1; + } + return 0; +} +""" + + +needs['complex_double_from_pyobj'] = ['complex_double', 'npy_math.h'] +cfuncs['complex_double_from_pyobj'] = """ +static int +complex_double_from_pyobj(complex_double* v, PyObject *obj, const char *errmess) { + Py_complex c; + if (PyComplex_Check(obj)) { + c = PyComplex_AsCComplex(obj); + (*v).r = c.real; + (*v).i = c.imag; + return 1; + } + if (PyArray_IsScalar(obj, ComplexFloating)) { + if (PyArray_IsScalar(obj, CFloat)) { + npy_cfloat new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)npy_crealf(new); + (*v).i = (double)npy_cimagf(new); + } + else if (PyArray_IsScalar(obj, CLongDouble)) { + npy_clongdouble new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)npy_creall(new); + (*v).i = (double)npy_cimagl(new); + } + else { /* if (PyArray_IsScalar(obj, CDouble)) */ + PyArray_ScalarAsCtype(obj, v); + } + return 1; + } + if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ + PyArrayObject *arr; + if (PyArray_Check(obj)) { + arr = (PyArrayObject *)PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); + } + else { + arr = (PyArrayObject *)PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); + } + if (arr == NULL) { + return 0; + } + (*v).r = npy_creal(*(((npy_cdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimag(*(((npy_cdouble *)PyArray_DATA(arr)))); + Py_DECREF(arr); + return 1; + } + /* Python does not provide PyNumber_Complex function :-( */ + (*v).i = 0.0; + if (PyFloat_Check(obj)) { + (*v).r = PyFloat_AsDouble(obj); + return !((*v).r == -1.0 && PyErr_Occurred()); + } + if (PyLong_Check(obj)) { + (*v).r = PyLong_AsDouble(obj); + return !((*v).r == -1.0 && PyErr_Occurred()); + } + if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) { + PyObject *tmp = PySequence_GetItem(obj,0); + if (tmp) { + if (complex_double_from_pyobj(v,tmp,errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) + err = PyExc_TypeError; + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['complex_float_from_pyobj'] = [ + 'complex_float', 'complex_double_from_pyobj'] +cfuncs['complex_float_from_pyobj'] = """ +static int +complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) +{ + complex_double cd={0.0,0.0}; + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (float)cd.r; + (*v).i = (float)cd.i; + return 1; + } + return 0; +} +""" + + +cfuncs['try_pyarr_from_character'] = """ +static int try_pyarr_from_character(PyObject* obj, character* v) { + PyArrayObject *arr = (PyArrayObject*)obj; + if (!obj) return -2; + if (PyArray_Check(obj)) { + if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) { + *(character *)(PyArray_DATA(arr)) = *v; + return 1; + } + } + { + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = PyExc_ValueError; + strcpy(mess, "try_pyarr_from_character failed" + " -- expected bytes array-scalar|array, got "); + f2py_describe(obj, mess + strlen(mess)); + PyErr_SetString(err, mess); + } + } + return 0; +} +""" + +needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char'] +cfuncs[ + 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char'] +cfuncs[ + 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' +needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n' +needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n' +needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n' +needs['try_pyarr_from_long_long'] = [ + 'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] +cfuncs[ + 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' +needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n' +needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n' +needs['try_pyarr_from_complex_float'] = [ + 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] +cfuncs[ + 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' +needs['try_pyarr_from_complex_double'] = [ + 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] +cfuncs[ + 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' + + +needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] +# create the list of arguments to be used when calling back to python +cfuncs['create_cb_arglist'] = """ +static int +create_cb_arglist(PyObject* fun, PyTupleObject* xa , const int maxnofargs, + const int nofoptargs, int *nofargs, PyTupleObject **args, + const char *errmess) +{ + PyObject *tmp = NULL; + PyObject *tmp_fun = NULL; + Py_ssize_t tot, opt, ext, siz, i, di = 0; + CFUNCSMESS(\"create_cb_arglist\\n\"); + tot=opt=ext=siz=0; + /* Get the total number of arguments */ + if (PyFunction_Check(fun)) { + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + else { + di = 1; + if (PyObject_HasAttrString(fun,\"im_func\")) { + tmp_fun = PyObject_GetAttrString(fun,\"im_func\"); + } + else if (PyObject_HasAttrString(fun,\"__call__\")) { + tmp = PyObject_GetAttrString(fun,\"__call__\"); + if (PyObject_HasAttrString(tmp,\"im_func\")) + tmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); + else { + tmp_fun = fun; /* built-in function */ + Py_INCREF(tmp_fun); + tot = maxnofargs; + if (PyCFunction_Check(fun)) { + /* In case the function has a co_argcount (like on PyPy) */ + di = 0; + } + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + } + Py_XDECREF(tmp); + } + else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { + tot = maxnofargs; + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + else if (F2PyCapsule_Check(fun)) { + tot = maxnofargs; + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + if(ext>0) { + fprintf(stderr,\"extra arguments tuple cannot be used with PyCapsule call-back\\n\"); + goto capi_fail; + } + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + } + + if (tmp_fun == NULL) { + fprintf(stderr, + \"Call-back argument must be function|instance|instance.__call__|f2py-function \" + \"but got %s.\\n\", + ((fun == NULL) ? \"NULL\" : Py_TYPE(fun)->tp_name)); + goto capi_fail; + } + + if (PyObject_HasAttrString(tmp_fun,\"__code__\")) { + if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) { + PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\"); + Py_DECREF(tmp); + if (tmp_argcount == NULL) { + goto capi_fail; + } + tot = PyLong_AsSsize_t(tmp_argcount) - di; + Py_DECREF(tmp_argcount); + } + } + /* Get the number of optional arguments */ + if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { + if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) + opt = PyTuple_Size(tmp); + Py_XDECREF(tmp); + } + /* Get the number of extra arguments */ + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + /* Calculate the size of call-backs argument list */ + siz = MIN(maxnofargs+ext,tot); + *nofargs = MAX(0,siz-ext); + +#ifdef DEBUGCFUNCS + fprintf(stderr, + \"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),\" + \"tot,opt,ext,siz,nofargs = %d(-%d), %zd, %zd, %zd, %zd, %d\\n\", + maxnofargs, nofoptargs, tot, opt, ext, siz, *nofargs); +#endif + + if (siz < tot-opt) { + fprintf(stderr, + \"create_cb_arglist: Failed to build argument list \" + \"(siz) with enough arguments (tot-opt) required by \" + \"user-supplied function (siz,tot,opt=%zd, %zd, %zd).\\n\", + siz, tot, opt); + goto capi_fail; + } + + /* Initialize argument list */ + *args = (PyTupleObject *)PyTuple_New(siz); + for (i=0;i<*nofargs;i++) { + Py_INCREF(Py_None); + PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None); + } + if (xa != NULL) + for (i=(*nofargs);i 0: + if outneeds[n][0] not in needs: + out.append(outneeds[n][0]) + del outneeds[n][0] + else: + flag = 0 + for k in outneeds[n][1:]: + if k in needs[outneeds[n][0]]: + flag = 1 + break + if flag: + outneeds[n] = outneeds[n][1:] + [outneeds[n][0]] + else: + out.append(outneeds[n][0]) + del outneeds[n][0] + if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \ + and outneeds[n] != []: + print(n, saveout) + errmess( + 'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') + out = out + saveout + break + saveout = copy.copy(outneeds[n]) + if out == []: + out = [n] + res[n] = out + return res diff --git a/phivenv/Lib/site-packages/numpy/f2py/common_rules.py b/phivenv/Lib/site-packages/numpy/f2py/common_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..7a86d105730f141241f9155ffe485eec4f545a7d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/common_rules.py @@ -0,0 +1,146 @@ +""" +Build common block mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__ +f2py_version = __version__.version + +from .auxfuncs import ( + hasbody, hascommon, hasnote, isintent_hide, outmess, getuseblocks +) +from . import capi_maps +from . import func2subr +from .crackfortran import rmbadname + + +def findcommonblocks(block, top=1): + ret = [] + if hascommon(block): + for key, value in block['common'].items(): + vars_ = {v: block['vars'][v] for v in value} + ret.append((key, value, vars_)) + elif hasbody(block): + for b in block['body']: + ret = ret + findcommonblocks(b, 0) + if top: + tret = [] + names = [] + for t in ret: + if t[0] not in names: + names.append(t[0]) + tret.append(t) + return tret + return ret + + +def buildhooks(m): + ret = {'commonhooks': [], 'initcommonhooks': [], + 'docs': ['"COMMON blocks:\\n"']} + fwrap = [''] + + def fadd(line, s=fwrap): + s[0] = '%s\n %s' % (s[0], line) + chooks = [''] + + def cadd(line, s=chooks): + s[0] = '%s\n%s' % (s[0], line) + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = '%s\n%s' % (s[0], line) + doc = [''] + + def dadd(line, s=doc): + s[0] = '%s\n%s' % (s[0], line) + for (name, vnames, vars) in findcommonblocks(m): + lower_name = name.lower() + hnames, inames = [], [] + for n in vnames: + if isintent_hide(vars[n]): + hnames.append(n) + else: + inames.append(n) + if hnames: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n' % ( + name, ','.join(inames), ','.join(hnames))) + else: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % ( + name, ','.join(inames))) + fadd('subroutine f2pyinit%s(setupfunc)' % name) + for usename in getuseblocks(m): + fadd(f'use {usename}') + fadd('external setupfunc') + for n in vnames: + fadd(func2subr.var2fixfortran(vars, n)) + if name == '_BLNK_': + fadd('common %s' % (','.join(vnames))) + else: + fadd('common /%s/ %s' % (name, ','.join(vnames))) + fadd('call setupfunc(%s)' % (','.join(inames))) + fadd('end\n') + cadd('static FortranDataDef f2py_%s_def[] = {' % (name)) + idims = [] + for n in inames: + ct = capi_maps.getctype(vars[n]) + elsize = capi_maps.get_elsize(vars[n]) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, vars[n]) + if dm['dims']: + idims.append('(%s)' % (dm['dims'])) + else: + idims.append('') + dms = dm['dims'].strip() + if not dms: + dms = '-1' + cadd('\t{\"%s\",%s,{{%s}},%s, %s},' + % (n, dm['rank'], dms, at, elsize)) + cadd('\t{NULL}\n};') + inames1 = rmbadname(inames) + inames1_tps = ','.join(['char *' + s for s in inames1]) + cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps)) + cadd('\tint i_f2py=0;') + for n in inames1: + cadd('\tf2py_%s_def[i_f2py++].data = %s;' % (name, n)) + cadd('}') + if '_' in lower_name: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));' + % (F_FUNC, lower_name, name.upper(), + ','.join(['char*'] * len(inames1)))) + cadd('static void f2py_init_%s(void) {' % name) + cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, lower_name, name.upper(), name)) + cadd('}\n') + iadd('\ttmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' % (name, name)) + iadd('\tif (tmp == NULL) return NULL;') + iadd('\tif (F2PyDict_SetItemString(d, \"%s\", tmp) == -1) return NULL;' + % name) + iadd('\tPy_DECREF(tmp);') + tname = name.replace('_', '\\_') + dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) + dadd('\\begin{description}') + for n in inames: + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, vars[n]))) + if hasnote(vars[n]): + note = vars[n]['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd('--- %s' % (note)) + dadd('\\end{description}') + ret['docs'].append( + '"\t/%s/ %s\\n"' % (name, ','.join(map(lambda v, d: v + d, inames, idims)))) + ret['commonhooks'] = chooks + ret['initcommonhooks'] = ihooks + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fwrap[0] diff --git a/phivenv/Lib/site-packages/numpy/f2py/crackfortran.py b/phivenv/Lib/site-packages/numpy/f2py/crackfortran.py new file mode 100644 index 0000000000000000000000000000000000000000..3da426d05b40dcdaec745c65510c42c0bb40f66c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/crackfortran.py @@ -0,0 +1,3755 @@ +#!/usr/bin/env python3 +""" +crackfortran --- read fortran (77,90) code and extract declaration information. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + + +Usage of crackfortran: +====================== +Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h + -m ,--ignore-contains +Functions: crackfortran, crack2fortran +The following Fortran statements/constructions are supported +(or will be if needed): + block data,byte,call,character,common,complex,contains,data, + dimension,double complex,double precision,end,external,function, + implicit,integer,intent,interface,intrinsic, + logical,module,optional,parameter,private,public, + program,real,(sequence?),subroutine,type,use,virtual, + include,pythonmodule +Note: 'virtual' is mapped to 'dimension'. +Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). +Note: code after 'contains' will be ignored until its scope ends. +Note: 'common' statement is extended: dimensions are moved to variable definitions +Note: f2py directive: f2py is read as +Note: pythonmodule is introduced to represent Python module + +Usage: + `postlist=crackfortran(files)` + `postlist` contains declaration information read from the list of files `files`. + `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file + + `postlist` has the following structure: + *** it is a list of dictionaries containing `blocks': + B = {'block','body','vars','parent_block'[,'name','prefix','args','result', + 'implicit','externals','interfaced','common','sortvars', + 'commonvars','note']} + B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | + 'program' | 'block data' | 'type' | 'pythonmodule' | + 'abstract interface' + B['body'] --- list containing `subblocks' with the same structure as `blocks' + B['parent_block'] --- dictionary of a parent block: + C['body'][]['parent_block'] is C + B['vars'] --- dictionary of variable definitions + B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) + B['name'] --- name of the block (not if B['block']=='interface') + B['prefix'] --- prefix string (only if B['block']=='function') + B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' + B['result'] --- name of the return value (only if B['block']=='function') + B['implicit'] --- dictionary {'a':,'b':...} | None + B['externals'] --- list of variables being external + B['interfaced'] --- list of variables being external and defined + B['common'] --- dictionary of common blocks (list of objects) + B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) + B['from'] --- string showing the 'parents' of the current block + B['use'] --- dictionary of modules used in current block: + {:{['only':<0|1>],['map':{:,...}]}} + B['note'] --- list of LaTeX comments on the block + B['f2pyenhancements'] --- optional dictionary + {'threadsafe':'','fortranname':, + 'callstatement':|, + 'callprotoargument':, + 'usercode':|, + 'pymethoddef:' + } + B['entry'] --- dictionary {entryname:argslist,..} + B['varnames'] --- list of variable names given in the order of reading the + Fortran code, useful for derived types. + B['saved_interface'] --- a string of scanned routine signature, defines explicit interface + *** Variable definition is a dictionary + D = B['vars'][] = + {'typespec'[,'attrspec','kindselector','charselector','=','typename']} + D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | + 'double precision' | 'integer' | 'logical' | 'real' | 'type' + D['attrspec'] --- list of attributes (e.g. 'dimension()', + 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', + 'optional','required', etc) + K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = + 'complex' | 'integer' | 'logical' | 'real' ) + C = D['charselector'] = {['*','len','kind','f2py_len']} + (only if D['typespec']=='character') + D['='] --- initialization expression string + D['typename'] --- name of the type if D['typespec']=='type' + D['dimension'] --- list of dimension bounds + D['intent'] --- list of intent specifications + D['depend'] --- list of variable names on which current variable depends on + D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised + D['note'] --- list of LaTeX comments on the variable + *** Meaning of kind/char selectors (few examples): + D['typespec>']*K['*'] + D['typespec'](kind=K['kind']) + character*C['*'] + character(len=C['len'],kind=C['kind'], f2py_len=C['f2py_len']) + (see also fortran type declaration statement formats below) + +Fortran 90 type declaration statement format (F77 is subset of F90) +==================================================================== +(Main source: IBM XL Fortran 5.1 Language Reference Manual) +type declaration = [[]::] + = byte | + character[] | + complex[] | + double complex | + double precision | + integer[] | + logical[] | + real[] | + type() + = * | + ([len=][,[kind=]]) | + (kind=[,len=]) + = * | + ([kind=]) + = comma separated list of attributes. + Only the following attributes are used in + building up the interface: + external + (parameter --- affects '=' key) + optional + intent + Other attributes are ignored. + = in | out | inout + = comma separated list of dimension bounds. + = [[*][()] | [()]*] + [// | =] [,] + +In addition, the following attributes are used: check,depend,note + +TODO: + * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' + -> 'real x(2)') + The above may be solved by creating appropriate preprocessor program, for example. + +""" +import sys +import string +import fileinput +import re +import os +import copy +import platform +import codecs +from pathlib import Path +try: + import charset_normalizer +except ImportError: + charset_normalizer = None + +from . import __version__ + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * +from . import symbolic + +f2py_version = __version__.version + +# Global flags: +strictf77 = 1 # Ignore `!' comments unless line[0]=='!' +sourcecodeform = 'fix' # 'fix','free' +quiet = 0 # Be verbose if 0 (Obsolete: not used any more) +verbose = 1 # Be quiet if 0, extra verbose if > 1. +tabchar = 4 * ' ' +pyffilename = '' +f77modulename = '' +skipemptyends = 0 # for old F77 programs without 'program' statement +ignorecontains = 1 +dolowercase = 1 +debug = [] + +# Global variables +beginpattern = '' +currentfilename = '' +expectbegin = 1 +f90modulevars = {} +filepositiontext = '' +gotnextfile = 1 +groupcache = None +groupcounter = 0 +grouplist = {groupcounter: []} +groupname = '' +include_paths = [] +neededmodule = -1 +onlyfuncs = [] +previous_context = None +skipblocksuntil = -1 +skipfuncs = [] +skipfunctions = [] +usermodules = [] + + +def reset_global_f2py_vars(): + global groupcounter, grouplist, neededmodule, expectbegin + global skipblocksuntil, usermodules, f90modulevars, gotnextfile + global filepositiontext, currentfilename, skipfunctions, skipfuncs + global onlyfuncs, include_paths, previous_context + global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename + global f77modulename, skipemptyends, ignorecontains, dolowercase, debug + + # flags + strictf77 = 1 + sourcecodeform = 'fix' + quiet = 0 + verbose = 1 + tabchar = 4 * ' ' + pyffilename = '' + f77modulename = '' + skipemptyends = 0 + ignorecontains = 1 + dolowercase = 1 + debug = [] + # variables + groupcounter = 0 + grouplist = {groupcounter: []} + neededmodule = -1 + expectbegin = 1 + skipblocksuntil = -1 + usermodules = [] + f90modulevars = {} + gotnextfile = 1 + filepositiontext = '' + currentfilename = '' + skipfunctions = [] + skipfuncs = [] + onlyfuncs = [] + include_paths = [] + previous_context = None + + +def outmess(line, flag=1): + global filepositiontext + + if not verbose: + return + if not quiet: + if flag: + sys.stdout.write(filepositiontext) + sys.stdout.write(line) + +re._MAXCACHE = 50 +defaultimplicitrules = {} +for c in "abcdefghopqrstuvwxyz$_": + defaultimplicitrules[c] = {'typespec': 'real'} +for c in "ijklmn": + defaultimplicitrules[c] = {'typespec': 'integer'} +badnames = {} +invbadnames = {} +for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', + 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', + 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', + 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', + 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', + 'max', 'min', + 'flen', 'fshape', + 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', + 'type', 'default']: + badnames[n] = n + '_bn' + invbadnames[n + '_bn'] = n + + +def rmbadname1(name): + if name in badnames: + errmess('rmbadname1: Replacing "%s" with "%s".\n' % + (name, badnames[name])) + return badnames[name] + return name + + +def rmbadname(names): + return [rmbadname1(_m) for _m in names] + + +def undo_rmbadname1(name): + if name in invbadnames: + errmess('undo_rmbadname1: Replacing "%s" with "%s".\n' + % (name, invbadnames[name])) + return invbadnames[name] + return name + + +def undo_rmbadname(names): + return [undo_rmbadname1(_m) for _m in names] + + +_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search +_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search +_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search +_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match + +# Extensions +COMMON_FREE_EXTENSIONS = ['.f90', '.f95', '.f03', '.f08'] +COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f'] + + +def openhook(filename, mode): + """Ensures that filename is opened with correct encoding parameter. + + This function uses charset_normalizer package, when available, for + determining the encoding of the file to be opened. When charset_normalizer + is not available, the function detects only UTF encodings, otherwise, ASCII + encoding is used as fallback. + """ + # Reads in the entire file. Robust detection of encoding. + # Correctly handles comments or late stage unicode characters + # gh-22871 + if charset_normalizer is not None: + encoding = charset_normalizer.from_path(filename).best().encoding + else: + # hint: install charset_normalizer for correct encoding handling + # No need to read the whole file for trying with startswith + nbytes = min(32, os.path.getsize(filename)) + with open(filename, 'rb') as fhandle: + raw = fhandle.read(nbytes) + if raw.startswith(codecs.BOM_UTF8): + encoding = 'UTF-8-SIG' + elif raw.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)): + encoding = 'UTF-32' + elif raw.startswith((codecs.BOM_LE, codecs.BOM_BE)): + encoding = 'UTF-16' + else: + # Fallback, without charset_normalizer + encoding = 'ascii' + return open(filename, mode, encoding=encoding) + + +def is_free_format(fname): + """Check if file is in free format Fortran.""" + # f90 allows both fixed and free format, assuming fixed unless + # signs of free format are detected. + result = False + if Path(fname).suffix.lower() in COMMON_FREE_EXTENSIONS: + result = True + with openhook(fname, 'r') as fhandle: + line = fhandle.readline() + n = 15 # the number of non-comment lines to scan for hints + if _has_f_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = True + while n > 0 and line: + if line[0] != '!' and line.strip(): + n -= 1 + if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&': + result = True + break + line = fhandle.readline() + return result + + +# Read fortran (77,90) code +def readfortrancode(ffile, dowithline=show, istop=1): + """ + Read fortran codes from files and + 1) Get rid of comments, line continuations, and empty lines; lower cases. + 2) Call dowithline(line) on every line. + 3) Recursively call itself when statement \"include ''\" is met. + """ + global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77 + global beginpattern, quiet, verbose, dolowercase, include_paths + + if not istop: + saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase + if ffile == []: + return + localdolowercase = dolowercase + # cont: set to True when the content of the last line read + # indicates statement continuation + cont = False + finalline = '' + ll = '' + includeline = re.compile( + r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")', re.I) + cont1 = re.compile(r'(?P.*)&\s*\Z') + cont2 = re.compile(r'(\s*&|)(?P.*)') + mline_mark = re.compile(r".*?'''") + if istop: + dowithline('', -1) + ll, l1 = '', '' + spacedigits = [' '] + [str(_m) for _m in range(10)] + filepositiontext = '' + fin = fileinput.FileInput(ffile, openhook=openhook) + while True: + try: + l = fin.readline() + except UnicodeDecodeError as msg: + raise Exception( + f'readfortrancode: reading {fin.filename()}#{fin.lineno()}' + f' failed with\n{msg}.\nIt is likely that installing charset_normalizer' + ' package will help f2py determine the input file encoding' + ' correctly.') + if not l: + break + if fin.isfirstline(): + filepositiontext = '' + currentfilename = fin.filename() + gotnextfile = 1 + l1 = l + strictf77 = 0 + sourcecodeform = 'fix' + ext = os.path.splitext(currentfilename)[1] + if Path(currentfilename).suffix.lower() in COMMON_FIXED_EXTENSIONS and \ + not (_has_f90_header(l) or _has_fix_header(l)): + strictf77 = 1 + elif is_free_format(currentfilename) and not _has_fix_header(l): + sourcecodeform = 'free' + if strictf77: + beginpattern = beginpattern77 + else: + beginpattern = beginpattern90 + outmess('\tReading file %s (format:%s%s)\n' + % (repr(currentfilename), sourcecodeform, + strictf77 and ',strict' or '')) + + l = l.expandtabs().replace('\xa0', ' ') + # Get rid of newline characters + while not l == '': + if l[-1] not in "\n\r\f": + break + l = l[:-1] + # Unconditionally remove comments + (l, rl) = split_by_unquoted(l, '!') + l += ' ' + if rl[:5].lower() == '!f2py': # f2py directive + l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') + if l.strip() == '': # Skip empty line + if sourcecodeform == 'free': + # In free form, a statement continues in the next line + # that is not a comment line [3.3.2.4^1], lines with + # blanks are comment lines [3.3.2.3^1]. Hence, the + # line continuation flag must retain its state. + pass + else: + # In fixed form, statement continuation is determined + # by a non-blank character at the 6-th position. Empty + # line indicates a start of a new statement + # [3.3.3.3^1]. Hence, the line continuation flag must + # be reset. + cont = False + continue + if sourcecodeform == 'fix': + if l[0] in ['*', 'c', '!', 'C', '#']: + if l[1:5].lower() == 'f2py': # f2py directive + l = ' ' + l[5:] + else: # Skip comment line + cont = False + continue + elif strictf77: + if len(l) > 72: + l = l[:72] + if not (l[0] in spacedigits): + raise Exception('readfortrancode: Found non-(space,digit) char ' + 'in the first column.\n\tAre you sure that ' + 'this code is in fix form?\n\tline=%s' % repr(l)) + + if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '): + # Continuation of a previous line + ll = ll + l[6:] + finalline = '' + origfinalline = '' + else: + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + # clean up line beginning from possible digits. + l = ' ' + l[5:] + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + + elif sourcecodeform == 'free': + if not cont and ext == '.pyf' and mline_mark.match(l): + l = l + '\n' + while True: + lc = fin.readline() + if not lc: + errmess( + 'Unexpected end of file when reading multiline\n') + break + l = l + lc + if mline_mark.match(lc): + break + l = l.rstrip() + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + cont = (r is not None) + else: + raise ValueError( + "Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform)) + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [ + os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + l1 = ll + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + filepositiontext = '' + fin.close() + if istop: + dowithline('', 1) + else: + gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase = saveglobals + +# Crack line +beforethisafter = r'\s*(?P%s(?=\s*(\b(%s)\b)))' + \ + r'\s*(?P(\b(%s)\b))' + \ + r'\s*(?P%s)\s*\Z' +## +fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' +typespattern = re.compile( + beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type' +typespattern4implicit = re.compile(beforethisafter % ( + '', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I) +# +functionpattern = re.compile(beforethisafter % ( + r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' +subroutinepattern = re.compile(beforethisafter % ( + r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' +# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' +# +groupbegins77 = r'program|block\s*data' +beginpattern77 = re.compile( + beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' +groupbegins90 = groupbegins77 + \ + r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|' + \ + r'type(?!\s*\()' +beginpattern90 = re.compile( + beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' +groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|' + r'endinterface|endsubroutine|endfunction') +endpattern = re.compile( + beforethisafter % ('', groupends, groupends, '.*'), re.I), 'end' +# block, the Fortran 2008 construct needs special handling in the rest of the file +endifs = r'end\s*(if|do|where|select|while|forall|associate|' + \ + r'critical|enum|team)' +endifpattern = re.compile( + beforethisafter % (r'[\w]*?', endifs, endifs, '.*'), re.I), 'endif' +# +moduleprocedures = r'module\s*procedure' +moduleprocedurepattern = re.compile( + beforethisafter % ('', moduleprocedures, moduleprocedures, '.*'), re.I), \ + 'moduleprocedure' +implicitpattern = re.compile( + beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit' +dimensionpattern = re.compile(beforethisafter % ( + '', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' +externalpattern = re.compile( + beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external' +optionalpattern = re.compile( + beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional' +requiredpattern = re.compile( + beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required' +publicpattern = re.compile( + beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public' +privatepattern = re.compile( + beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private' +intrinsicpattern = re.compile( + beforethisafter % ('', 'intrinsic', 'intrinsic', '.*'), re.I), 'intrinsic' +intentpattern = re.compile(beforethisafter % ( + '', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent' +parameterpattern = re.compile( + beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter' +datapattern = re.compile( + beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data' +callpattern = re.compile( + beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call' +entrypattern = re.compile( + beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry' +callfunpattern = re.compile( + beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun' +commonpattern = re.compile( + beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common' +usepattern = re.compile( + beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use' +containspattern = re.compile( + beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains' +formatpattern = re.compile( + beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format' +# Non-fortran and f2py-specific statements +f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', + 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements' +multilinepattern = re.compile( + r"\s*(?P''')(?P.*?)(?P''')\s*\Z", re.S), 'multiline' +## + +def split_by_unquoted(line, characters): + """ + Splits the line into (line[:i], line[i:]), + where i is the index of first occurrence of one of the characters + not within quotes, or len(line) if no such index exists + """ + assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes" + r = re.compile( + r"\A(?P({single_quoted}|{double_quoted}|{not_quoted})*)" + r"(?P{char}.*)\Z".format( + not_quoted="[^\"'{}]".format(re.escape(characters)), + char="[{}]".format(re.escape(characters)), + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")')) + m = r.match(line) + if m: + d = m.groupdict() + return (d["before"], d["after"]) + return (line, "") + +def _simplifyargs(argsline): + a = [] + for n in markoutercomma(argsline).split('@,@'): + for r in '(),': + n = n.replace(r, '_') + a.append(n) + return ','.join(a) + +crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) +crackline_bind_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) +crackline_bindlang = re.compile(r'\s*bind\(\s*(?P[^,]+)\s*,\s*name\s*=\s*"(?P[^"]+)"\s*\)', re.I) + +def crackline(line, reset=0): + """ + reset=-1 --- initialize + reset=0 --- crack the line + reset=1 --- final check if mismatch of blocks occurred + + Cracked data is saved in grouplist[0]. + """ + global beginpattern, groupcounter, groupname, groupcache, grouplist + global filepositiontext, currentfilename, neededmodule, expectbegin + global skipblocksuntil, skipemptyends, previous_context, gotnextfile + + _, has_semicolon = split_by_unquoted(line, ";") + if has_semicolon and not (f2pyenhancementspattern[0].match(line) or + multilinepattern[0].match(line)): + # XXX: non-zero reset values need testing + assert reset == 0, repr(reset) + # split line on unquoted semicolons + line, semicolon_line = split_by_unquoted(line, ";") + while semicolon_line: + crackline(line, reset) + line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";") + crackline(line, reset) + return + if reset < 0: + groupcounter = 0 + groupname = {groupcounter: ''} + groupcache = {groupcounter: {}} + grouplist = {groupcounter: []} + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = '' + groupcache[groupcounter]['name'] = '' + neededmodule = -1 + skipblocksuntil = -1 + return + if reset > 0: + fl = 0 + if f77modulename and neededmodule == groupcounter: + fl = 2 + while groupcounter > fl: + outmess('crackline: groupcounter=%s groupname=%s\n' % + (repr(groupcounter), repr(groupname))) + outmess( + 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if f77modulename and neededmodule == groupcounter: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end module + neededmodule = -1 + return + if line == '': + return + flag = 0 + for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, + requiredpattern, + parameterpattern, datapattern, publicpattern, privatepattern, + intrinsicpattern, + endifpattern, endpattern, + formatpattern, + beginpattern, functionpattern, subroutinepattern, + implicitpattern, typespattern, commonpattern, + callpattern, usepattern, containspattern, + entrypattern, + f2pyenhancementspattern, + multilinepattern, + moduleprocedurepattern + ]: + m = pat[0].match(line) + if m: + break + flag = flag + 1 + if not m: + re_1 = crackline_re_1 + if 0 <= skipblocksuntil <= groupcounter: + return + if 'externals' in groupcache[groupcounter]: + for name in groupcache[groupcounter]['externals']: + if name in invbadnames: + name = invbadnames[name] + if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: + continue + m1 = re.match( + r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) + if m1: + m2 = re_1.match(m1.group('before')) + a = _simplifyargs(m1.group('args')) + if m2: + line = 'callfun %s(%s) result (%s)' % ( + name, a, m2.group('result')) + else: + line = 'callfun %s(%s)' % (name, a) + m = callfunpattern[0].match(line) + if not m: + outmess( + 'crackline: could not resolve function call for line=%s.\n' % repr(line)) + return + analyzeline(m, 'callfun', line) + return + if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): + previous_context = None + outmess('crackline:%d: No pattern for line\n' % (groupcounter)) + return + elif pat[1] == 'end': + if 0 <= skipblocksuntil < groupcounter: + groupcounter = groupcounter - 1 + if skipblocksuntil <= groupcounter: + return + if groupcounter <= 0: + raise Exception('crackline: groupcounter(=%s) is nonpositive. ' + 'Check the blocks.' + % (groupcounter)) + m1 = beginpattern[0].match((line)) + if (m1) and (not m1.group('this') == groupname[groupcounter]): + raise Exception('crackline: End group %s does not match with ' + 'previous Begin group %s\n\t%s' % + (repr(m1.group('this')), repr(groupname[groupcounter]), + filepositiontext) + ) + if skipblocksuntil == groupcounter: + skipblocksuntil = -1 + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if not skipemptyends: + expectbegin = 1 + elif pat[1] == 'begin': + if 0 <= skipblocksuntil <= groupcounter: + groupcounter = groupcounter + 1 + return + gotnextfile = 0 + analyzeline(m, pat[1], line) + expectbegin = 0 + elif pat[1] == 'endif': + pass + elif pat[1] == 'moduleprocedure': + analyzeline(m, pat[1], line) + elif pat[1] == 'contains': + if ignorecontains: + return + if 0 <= skipblocksuntil <= groupcounter: + return + skipblocksuntil = groupcounter + else: + if 0 <= skipblocksuntil <= groupcounter: + return + analyzeline(m, pat[1], line) + + +def markouterparen(line): + l = '' + f = 0 + for c in line: + if c == '(': + f = f + 1 + if f == 1: + l = l + '@(@' + continue + elif c == ')': + f = f - 1 + if f == 0: + l = l + '@)@' + continue + l = l + c + return l + + +def markoutercomma(line, comma=','): + l = '' + f = 0 + before, after = split_by_unquoted(line, comma + '()') + l += before + while after: + if (after[0] == comma) and (f == 0): + l += '@' + comma + '@' + else: + l += after[0] + if after[0] == '(': + f += 1 + elif after[0] == ')': + f -= 1 + before, after = split_by_unquoted(after[1:], comma + '()') + l += before + assert not f, repr((f, line, l)) + return l + +def unmarkouterparen(line): + r = line.replace('@(@', '(').replace('@)@', ')') + return r + + +def appenddecl(decl, decl2, force=1): + if not decl: + decl = {} + if not decl2: + return decl + if decl is decl2: + return decl + for k in list(decl2.keys()): + if k == 'typespec': + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'attrspec': + for l in decl2[k]: + decl = setattrspec(decl, l, force) + elif k == 'kindselector': + decl = setkindselector(decl, decl2[k], force) + elif k == 'charselector': + decl = setcharselector(decl, decl2[k], force) + elif k in ['=', 'typename']: + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'note': + pass + elif k in ['intent', 'check', 'dimension', 'optional', + 'required', 'depend']: + errmess('appenddecl: "%s" not implemented.\n' % k) + else: + raise Exception('appenddecl: Unknown variable definition key: ' + + str(k)) + return decl + +selectpattern = re.compile( + r'\s*(?P(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) +typedefpattern = re.compile( + r'(?:,(?P[\w(),]+))?(::)?(?P\b[a-z$_][\w$]*\b)' + r'(?:\((?P[\w,]*)\))?\Z', re.I) +nameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P(?:(?!@\)@).)*)\s*@\)@))*\s*\Z', re.I) +operatorpattern = re.compile( + r'\s*(?P(operator|assignment))' + r'@\(@\s*(?P[^)]+)\s*@\)@\s*\Z', re.I) +callnameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*@\(@\s*(?P.*)\s*@\)@\s*\Z', re.I) +real16pattern = re.compile( + r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') +real8pattern = re.compile( + r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') + +_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) + + +def _is_intent_callback(vdecl): + for a in vdecl.get('attrspec', []): + if _intentcallbackpattern.match(a): + return 1 + return 0 + + +def _resolvetypedefpattern(line): + line = ''.join(line.split()) # removes whitespace + m1 = typedefpattern.match(line) + print(line, m1) + if m1: + attrs = m1.group('attributes') + attrs = [a.lower() for a in attrs.split(',')] if attrs else [] + return m1.group('name'), attrs, m1.group('params') + return None, [], None + +def parse_name_for_bind(line): + pattern = re.compile(r'bind\(\s*(?P[^,]+)(?:\s*,\s*name\s*=\s*["\'](?P[^"\']+)["\']\s*)?\)', re.I) + match = pattern.search(line) + bind_statement = None + if match: + bind_statement = match.group(0) + # Remove the 'bind' construct from the line. + line = line[:match.start()] + line[match.end():] + return line, bind_statement + +def _resolvenameargspattern(line): + line, bind_cname = parse_name_for_bind(line) + line = markouterparen(line) + m1 = nameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), m1.group('result'), bind_cname + m1 = operatorpattern.match(line) + if m1: + name = m1.group('scheme') + '(' + m1.group('name') + ')' + return name, [], None, None + m1 = callnameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), None, None + return None, [], None, None + + +def analyzeline(m, case, line): + """ + Reads each line in the input file in sequence and updates global vars. + + Effectively reads and collects information from the input file to the + global variable groupcache, a dictionary containing info about each part + of the fortran module. + + At the end of analyzeline, information is filtered into the correct dict + keys, but parameter values and dimensions are not yet interpreted. + """ + global groupcounter, groupname, groupcache, grouplist, filepositiontext + global currentfilename, f77modulename, neededinterface, neededmodule + global expectbegin, gotnextfile, previous_context + + block = m.group('this') + if case != 'multiline': + previous_context = None + if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ + and not skipemptyends and groupcounter < 1: + newname = os.path.basename(currentfilename).split('.')[0] + outmess( + 'analyzeline: no group yet. Creating program group with name "%s".\n' % newname) + gotnextfile = 0 + groupcounter = groupcounter + 1 + groupname[groupcounter] = 'program' + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = 'program' + groupcache[groupcounter]['name'] = newname + groupcache[groupcounter]['from'] = 'fromsky' + expectbegin = 0 + if case in ['begin', 'call', 'callfun']: + # Crack line => block,name,args,result + block = block.lower() + if re.match(r'block\s*data', block, re.I): + block = 'block data' + elif re.match(r'python\s*module', block, re.I): + block = 'python module' + elif re.match(r'abstract\s*interface', block, re.I): + block = 'abstract interface' + if block == 'type': + name, attrs, _ = _resolvetypedefpattern(m.group('after')) + groupcache[groupcounter]['vars'][name] = dict(attrspec = attrs) + args = [] + result = None + else: + name, args, result, bindcline = _resolvenameargspattern(m.group('after')) + if name is None: + if block == 'block data': + name = '_BLOCK_DATA_' + else: + name = '' + if block not in ['interface', 'block data', 'abstract interface']: + outmess('analyzeline: No name/args pattern found for line.\n') + + previous_context = (block, name, groupcounter) + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + if '' in args: + while '' in args: + args.remove('') + outmess( + 'analyzeline: argument list is malformed (missing argument).\n') + + # end of crack line => block,name,args,result + needmodule = 0 + needinterface = 0 + + if case in ['call', 'callfun']: + needinterface = 1 + if 'args' not in groupcache[groupcounter]: + return + if name not in groupcache[groupcounter]['args']: + return + for it in grouplist[groupcounter]: + if it['name'] == name: + return + if name in groupcache[groupcounter]['interfaced']: + return + block = {'call': 'subroutine', 'callfun': 'function'}[case] + if f77modulename and neededmodule == -1 and groupcounter <= 1: + neededmodule = groupcounter + 2 + needmodule = 1 + if block not in ['interface', 'abstract interface']: + needinterface = 1 + # Create new block(s) + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needmodule: + if verbose > 1: + outmess('analyzeline: Creating module block %s\n' % + repr(f77modulename), 0) + groupname[groupcounter] = 'module' + groupcache[groupcounter]['block'] = 'python module' + groupcache[groupcounter]['name'] = f77modulename + groupcache[groupcounter]['from'] = '' + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needinterface: + if verbose > 1: + outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % ( + groupcounter), 0) + groupname[groupcounter] = 'interface' + groupcache[groupcounter]['block'] = 'interface' + groupcache[groupcounter]['name'] = 'unknown_interface' + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupname[groupcounter] = block + groupcache[groupcounter]['block'] = block + if not name: + name = 'unknown_' + block.replace(' ', '_') + groupcache[groupcounter]['prefix'] = m.group('before') + groupcache[groupcounter]['name'] = rmbadname1(name) + groupcache[groupcounter]['result'] = result + if groupcounter == 1: + groupcache[groupcounter]['from'] = currentfilename + else: + if f77modulename and groupcounter == 3: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], currentfilename) + else: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + for k in list(groupcache[groupcounter].keys()): + if not groupcache[groupcounter][k]: + del groupcache[groupcounter][k] + + groupcache[groupcounter]['args'] = args + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['entry'] = {} + # end of creation + if block == 'type': + groupcache[groupcounter]['varnames'] = [] + + if case in ['call', 'callfun']: # set parents variables + if name not in groupcache[groupcounter - 2]['externals']: + groupcache[groupcounter - 2]['externals'].append(name) + groupcache[groupcounter]['vars'] = copy.deepcopy( + groupcache[groupcounter - 2]['vars']) + try: + del groupcache[groupcounter]['vars'][name][ + groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] + except Exception: + pass + if block in ['function', 'subroutine']: # set global attributes + # name is fortran name + if bindcline: + bindcdat = re.search(crackline_bindlang, bindcline) + if bindcdat: + groupcache[groupcounter]['bindlang'] = {name : {}} + groupcache[groupcounter]['bindlang'][name]["lang"] = bindcdat.group('lang') + if bindcdat.group('lang_name'): + groupcache[groupcounter]['bindlang'][name]["name"] = bindcdat.group('lang_name') + try: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) + except Exception: + pass + if case == 'callfun': # return type + if result and result in groupcache[groupcounter]['vars']: + if not name == result: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) + # if groupcounter>1: # name is interfaced + try: + groupcache[groupcounter - 2]['interfaced'].append(name) + except Exception: + pass + if block == 'function': + t = typespattern[0].match(m.group('before') + ' ' + name) + if t: + typespec, selector, attr, edecl = cracktypespec0( + t.group('this'), t.group('after')) + updatevars(typespec, selector, attr, edecl) + + if case in ['call', 'callfun']: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end routine + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + + elif case == 'entry': + name, args, result, _= _resolvenameargspattern(m.group('after')) + if name is not None: + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + assert result is None, repr(result) + groupcache[groupcounter]['entry'][name] = args + previous_context = ('entry', name, groupcounter) + elif case == 'type': + typespec, selector, attr, edecl = cracktypespec0( + block, m.group('after')) + last_name = updatevars(typespec, selector, attr, edecl) + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrinsic']: + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip() + i = ll.find('::') + if i < 0 and case == 'intent': + i = markouterparen(ll).find('@)@') - 2 + ll = ll[:i + 1] + '::' + ll[i + 1:] + i = ll.find('::') + if ll[i:] == '::' and 'args' in groupcache[groupcounter]: + outmess('All arguments will have attribute %s%s\n' % + (m.group('this'), ll[:i])) + ll = ll + ','.join(groupcache[groupcounter]['args']) + if i < 0: + i = 0 + pl = '' + else: + pl = ll[:i].strip() + ll = ll[i + 2:] + ch = markoutercomma(pl).split('@,@') + if len(ch) > 1: + pl = ch[0] + outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % ( + ','.join(ch[1:]))) + last_name = None + + for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: + m1 = namepattern.match(e) + if not m1: + if case in ['public', 'private']: + k = '' + else: + print(m.groupdict()) + outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % ( + case, repr(e))) + continue + else: + k = rmbadname1(m1.group('name')) + if case in ['public', 'private'] and \ + (k == 'operator' or k == 'assignment'): + k += m1.group('after') + if k not in edecl: + edecl[k] = {} + if case == 'dimension': + ap = case + m1.group('after') + if case == 'intent': + ap = m.group('this') + pl + if _intentcallbackpattern.match(ap): + if k not in groupcache[groupcounter]['args']: + if groupcounter > 1: + if '__user__' not in groupcache[groupcounter - 2]['name']: + outmess( + 'analyzeline: missing __user__ module (could be nothing)\n') + # fixes ticket 1693 + if k != groupcache[groupcounter]['name']: + outmess('analyzeline: appending intent(callback) %s' + ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) + groupcache[groupcounter]['args'].append(k) + else: + errmess( + 'analyzeline: intent(callback) %s is ignored\n' % (k)) + else: + errmess('analyzeline: intent(callback) %s is already' + ' in argument list\n' % (k)) + if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']: + ap = case + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append(ap) + else: + edecl[k]['attrspec'] = [ap] + if case == 'external': + if groupcache[groupcounter]['block'] == 'program': + outmess('analyzeline: ignoring program arguments\n') + continue + if k not in groupcache[groupcounter]['args']: + continue + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(k) + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'moduleprocedure': + groupcache[groupcounter]['implementedby'] = \ + [x.strip() for x in m.group('after').split(',')] + elif case == 'parameter': + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip()[1:-1] + last_name = None + for e in markoutercomma(ll).split('@,@'): + try: + k, initexpr = [x.strip() for x in e.split('=')] + except Exception: + outmess( + 'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll)) + continue + params = get_parameters(edecl) + k = rmbadname1(k) + if k not in edecl: + edecl[k] = {} + if '=' in edecl[k] and (not edecl[k]['='] == initexpr): + outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % ( + k, edecl[k]['='], initexpr)) + t = determineexprtype(initexpr, params) + if t: + if t.get('typespec') == 'real': + tt = list(initexpr) + for m in real16pattern.finditer(initexpr): + tt[m.start():m.end()] = list( + initexpr[m.start():m.end()].lower().replace('d', 'e')) + initexpr = ''.join(tt) + elif t.get('typespec') == 'complex': + initexpr = initexpr[1:].lower().replace('d', 'e').\ + replace(',', '+1j*(') + try: + v = eval(initexpr, {}, params) + except (SyntaxError, NameError, TypeError) as msg: + errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n' + % (initexpr, msg)) + continue + edecl[k]['='] = repr(v) + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append('parameter') + else: + edecl[k]['attrspec'] = ['parameter'] + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'implicit': + if m.group('after').strip().lower() == 'none': + groupcache[groupcounter]['implicit'] = None + elif m.group('after'): + if 'implicit' in groupcache[groupcounter]: + impl = groupcache[groupcounter]['implicit'] + else: + impl = {} + if impl is None: + outmess( + 'analyzeline: Overwriting earlier "implicit none" statement.\n') + impl = {} + for e in markoutercomma(m.group('after')).split('@,@'): + decl = {} + m1 = re.match( + r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) + if not m1: + outmess( + 'analyzeline: could not extract info of implicit statement part "%s"\n' % (e)) + continue + m2 = typespattern4implicit.match(m1.group('this')) + if not m2: + outmess( + 'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e)) + continue + typespec, selector, attr, edecl = cracktypespec0( + m2.group('this'), m2.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + decl['typespec'] = typespec + decl['kindselector'] = kindselect + decl['charselector'] = charselect + decl['typename'] = typename + for k in list(decl.keys()): + if not decl[k]: + del decl[k] + for r in markoutercomma(m1.group('after')).split('@,@'): + if '-' in r: + try: + begc, endc = [x.strip() for x in r.split('-')] + except Exception: + outmess( + 'analyzeline: expected "-" instead of "%s" in range list of implicit statement\n' % r) + continue + else: + begc = endc = r.strip() + if not len(begc) == len(endc) == 1: + outmess( + 'analyzeline: expected "-" instead of "%s" in range list of implicit statement (2)\n' % r) + continue + for o in range(ord(begc), ord(endc) + 1): + impl[chr(o)] = decl + groupcache[groupcounter]['implicit'] = impl + elif case == 'data': + ll = [] + dl = '' + il = '' + f = 0 + fc = 1 + inp = 0 + for c in m.group('after'): + if not inp: + if c == "'": + fc = not fc + if c == '/' and fc: + f = f + 1 + continue + if c == '(': + inp = inp + 1 + elif c == ')': + inp = inp - 1 + if f == 0: + dl = dl + c + elif f == 1: + il = il + c + elif f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + dl = c + il = '' + f = 0 + if f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + vars = groupcache[groupcounter].get('vars', {}) + last_name = None + for l in ll: + l[0], l[1] = l[0].strip(), l[1].strip() + if l[0].startswith(','): + l[0] = l[0][1:] + if l[0].startswith('('): + outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0]) + continue + for idx, v in enumerate(rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')])): + if v.startswith('('): + outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v) + # XXX: subsequent init expressions may get wrong values. + # Ignoring since data statements are irrelevant for + # wrapping. + continue + if '!' in l[1]: + # Fixes gh-24746 pyf generation + # XXX: This essentially ignores the value for generating the pyf which is fine: + # integer dimension(3) :: mytab + # common /mycom/ mytab + # Since in any case it is initialized in the Fortran code + outmess('Comment line in declaration "%s" is not supported. Skipping.\n' % l[1]) + continue + vars.setdefault(v, {}) + vtype = vars[v].get('typespec') + vdim = getdimension(vars[v]) + matches = re.findall(r"\(.*?\)", l[1]) if vtype == 'complex' else l[1].split(',') + try: + new_val = "(/{}/)".format(", ".join(matches)) if vdim else matches[idx] + except IndexError: + # gh-24746 + # Runs only if above code fails. Fixes the line + # DATA IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 /4*0,0.0D0/ + # by expanding to ['0', '0', '0', '0', '0.0d0'] + if any("*" in m for m in matches): + expanded_list = [] + for match in matches: + if "*" in match: + try: + multiplier, value = match.split("*") + expanded_list.extend([value.strip()] * int(multiplier)) + except ValueError: # if int(multiplier) fails + expanded_list.append(match.strip()) + else: + expanded_list.append(match.strip()) + matches = expanded_list + new_val = "(/{}/)".format(", ".join(matches)) if vdim else matches[idx] + current_val = vars[v].get('=') + if current_val and (current_val != new_val): + outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % (v, current_val, new_val)) + vars[v]['='] = new_val + last_name = v + groupcache[groupcounter]['vars'] = vars + if last_name: + previous_context = ('variable', last_name, groupcounter) + elif case == 'common': + line = m.group('after').strip() + if not line[0] == '/': + line = '//' + line + cl = [] + f = 0 + bn = '' + ol = '' + for c in line: + if c == '/': + f = f + 1 + continue + if f >= 3: + bn = bn.strip() + if not bn: + bn = '_BLNK_' + cl.append([bn, ol]) + f = f - 2 + bn = '' + ol = '' + if f % 2: + bn = bn + c + else: + ol = ol + c + bn = bn.strip() + if not bn: + bn = '_BLNK_' + cl.append([bn, ol]) + commonkey = {} + if 'common' in groupcache[groupcounter]: + commonkey = groupcache[groupcounter]['common'] + for c in cl: + if c[0] not in commonkey: + commonkey[c[0]] = [] + for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: + if i: + commonkey[c[0]].append(i) + groupcache[groupcounter]['common'] = commonkey + previous_context = ('common', bn, groupcounter) + elif case == 'use': + m1 = re.match( + r'\A\s*(?P\b\w+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) + if m1: + mm = m1.groupdict() + if 'use' not in groupcache[groupcounter]: + groupcache[groupcounter]['use'] = {} + name = m1.group('name') + groupcache[groupcounter]['use'][name] = {} + isonly = 0 + if 'list' in mm and mm['list'] is not None: + if 'notonly' in mm and mm['notonly'] is None: + isonly = 1 + groupcache[groupcounter]['use'][name]['only'] = isonly + ll = [x.strip() for x in mm['list'].split(',')] + rl = {} + for l in ll: + if '=' in l: + m2 = re.match( + r'\A\s*(?P\b\w+\b)\s*=\s*>\s*(?P\b\w+\b)\s*\Z', l, re.I) + if m2: + rl[m2.group('local').strip()] = m2.group( + 'use').strip() + else: + outmess( + 'analyzeline: Not local=>use pattern found in %s\n' % repr(l)) + else: + rl[l] = l + groupcache[groupcounter]['use'][name]['map'] = rl + else: + pass + else: + print(m.groupdict()) + outmess('analyzeline: Could not crack the use statement.\n') + elif case in ['f2pyenhancements']: + if 'f2pyenhancements' not in groupcache[groupcounter]: + groupcache[groupcounter]['f2pyenhancements'] = {} + d = groupcache[groupcounter]['f2pyenhancements'] + if m.group('this') == 'usercode' and 'usercode' in d: + if isinstance(d['usercode'], str): + d['usercode'] = [d['usercode']] + d['usercode'].append(m.group('after')) + else: + d[m.group('this')] = m.group('after') + elif case == 'multiline': + if previous_context is None: + if verbose: + outmess('analyzeline: No context for multiline block.\n') + return + gc = groupcounter + appendmultiline(groupcache[gc], + previous_context[:2], + m.group('this')) + else: + if verbose > 1: + print(m.groupdict()) + outmess('analyzeline: No code implemented for line.\n') + + +def appendmultiline(group, context_name, ml): + if 'f2pymultilines' not in group: + group['f2pymultilines'] = {} + d = group['f2pymultilines'] + if context_name not in d: + d[context_name] = [] + d[context_name].append(ml) + return + + +def cracktypespec0(typespec, ll): + selector = None + attr = None + if re.match(r'double\s*complex', typespec, re.I): + typespec = 'double complex' + elif re.match(r'double\s*precision', typespec, re.I): + typespec = 'double precision' + else: + typespec = typespec.strip().lower() + m1 = selectpattern.match(markouterparen(ll)) + if not m1: + outmess( + 'cracktypespec0: no kind/char_selector pattern found for line.\n') + return + d = m1.groupdict() + for k in list(d.keys()): + d[k] = unmarkouterparen(d[k]) + if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: + selector = d['this'] + ll = d['after'] + i = ll.find('::') + if i >= 0: + attr = ll[:i].strip() + ll = ll[i + 2:] + return typespec, selector, attr, ll +##### +namepattern = re.compile(r'\s*(?P\b\w+\b)\s*(?P.*)\s*\Z', re.I) +kindselector = re.compile( + r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|\*\s*(?P.*?))\s*\Z', re.I) +charselector = re.compile( + r'\s*(\((?P.*)\)|\*\s*(?P.*))\s*\Z', re.I) +lenkindpattern = re.compile( + r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)' + r'|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)' + r'|(f2py_len\s*=\s*(?P.*))|))\s*\Z', re.I) +lenarraypattern = re.compile( + r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*\*\s*(?P.*?)|(\*\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) + + +def removespaces(expr): + expr = expr.strip() + if len(expr) <= 1: + return expr + expr2 = expr[0] + for i in range(1, len(expr) - 1): + if (expr[i] == ' ' and + ((expr[i + 1] in "()[]{}=+-/* ") or + (expr[i - 1] in "()[]{}=+-/* "))): + continue + expr2 = expr2 + expr[i] + expr2 = expr2 + expr[-1] + return expr2 + + +def markinnerspaces(line): + """ + The function replace all spaces in the input variable line which are + surrounded with quotation marks, with the triplet "@_@". + + For instance, for the input "a 'b c'" the function returns "a 'b@_@c'" + + Parameters + ---------- + line : str + + Returns + ------- + str + + """ + fragment = '' + inside = False + current_quote = None + escaped = '' + for c in line: + if escaped == '\\' and c in ['\\', '\'', '"']: + fragment += c + escaped = c + continue + if not inside and c in ['\'', '"']: + current_quote = c + if c == current_quote: + inside = not inside + elif c == ' ' and inside: + fragment += '@_@' + continue + fragment += c + escaped = c # reset to non-backslash + return fragment + + +def updatevars(typespec, selector, attrspec, entitydecl): + """ + Returns last_name, the variable name without special chars, parenthesis + or dimension specifiers. + + Alters groupcache to add the name, typespec, attrspec (and possibly value) + of current variable. + """ + global groupcache, groupcounter + + last_name = None + kindselect, charselect, typename = cracktypespec(typespec, selector) + # Clean up outer commas, whitespace and undesired chars from attrspec + if attrspec: + attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')] + l = [] + c = re.compile(r'(?P[a-zA-Z]+)') + for a in attrspec: + if not a: + continue + m = c.match(a) + if m: + s = m.group('start').lower() + a = s + a[len(s):] + l.append(a) + attrspec = l + el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')] + el1 = [] + for e in el: + for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: + if e1: + el1.append(e1.replace('@_@', ' ')) + for e in el1: + m = namepattern.match(e) + if not m: + outmess( + 'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e))) + continue + ename = rmbadname1(m.group('name')) + edecl = {} + if ename in groupcache[groupcounter]['vars']: + edecl = groupcache[groupcounter]['vars'][ename].copy() + not_has_typespec = 'typespec' not in edecl + if not_has_typespec: + edecl['typespec'] = typespec + elif typespec and (not typespec == edecl['typespec']): + outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typespec'], typespec)) + if 'kindselector' not in edecl: + edecl['kindselector'] = copy.copy(kindselect) + elif kindselect: + for k in list(kindselect.keys()): + if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]): + outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['kindselector'][k], kindselect[k])) + else: + edecl['kindselector'][k] = copy.copy(kindselect[k]) + if 'charselector' not in edecl and charselect: + if not_has_typespec: + edecl['charselector'] = charselect + else: + errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' + % (ename, charselect)) + elif charselect: + for k in list(charselect.keys()): + if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]): + outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['charselector'][k], charselect[k])) + else: + edecl['charselector'][k] = copy.copy(charselect[k]) + if 'typename' not in edecl: + edecl['typename'] = typename + elif typename and (not edecl['typename'] == typename): + outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typename'], typename)) + if 'attrspec' not in edecl: + edecl['attrspec'] = copy.copy(attrspec) + elif attrspec: + for a in attrspec: + if a not in edecl['attrspec']: + edecl['attrspec'].append(a) + else: + edecl['typespec'] = copy.copy(typespec) + edecl['kindselector'] = copy.copy(kindselect) + edecl['charselector'] = copy.copy(charselect) + edecl['typename'] = typename + edecl['attrspec'] = copy.copy(attrspec) + if 'external' in (edecl.get('attrspec') or []) and e in groupcache[groupcounter]['args']: + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(e) + if m.group('after'): + m1 = lenarraypattern.match(markouterparen(m.group('after'))) + if m1: + d1 = m1.groupdict() + for lk in ['len', 'array', 'init']: + if d1[lk + '2'] is not None: + d1[lk] = d1[lk + '2'] + del d1[lk + '2'] + for k in list(d1.keys()): + if d1[k] is not None: + d1[k] = unmarkouterparen(d1[k]) + else: + del d1[k] + + if 'len' in d1 and 'array' in d1: + if d1['len'] == '': + d1['len'] = d1['array'] + del d1['array'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector'] = {} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*'] = d1['len'] + del d1['len'] + else: + d1['array'] = d1['array'] + ',' + d1['len'] + del d1['len'] + errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( + typespec, e, typespec, ename, d1['array'])) + + if 'len' in d1: + if typespec in ['complex', 'integer', 'logical', 'real']: + if ('kindselector' not in edecl) or (not edecl['kindselector']): + edecl['kindselector'] = {} + edecl['kindselector']['*'] = d1['len'] + del d1['len'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector'] = {} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*'] = d1['len'] + del d1['len'] + + if 'init' in d1: + if '=' in edecl and (not edecl['='] == d1['init']): + outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['='], d1['init'])) + else: + edecl['='] = d1['init'] + + if 'array' in d1: + dm = 'dimension(%s)' % d1['array'] + if 'attrspec' not in edecl or (not edecl['attrspec']): + edecl['attrspec'] = [dm] + else: + edecl['attrspec'].append(dm) + for dm1 in edecl['attrspec']: + if dm1[:9] == 'dimension' and dm1 != dm: + del edecl['attrspec'][-1] + errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' + % (ename, dm1, dm)) + break + + else: + outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % ( + ename + m.group('after'))) + for k in list(edecl.keys()): + if not edecl[k]: + del edecl[k] + groupcache[groupcounter]['vars'][ename] = edecl + if 'varnames' in groupcache[groupcounter]: + groupcache[groupcounter]['varnames'].append(ename) + last_name = ename + return last_name + + +def cracktypespec(typespec, selector): + kindselect = None + charselect = None + typename = None + if selector: + if typespec in ['complex', 'integer', 'logical', 'real']: + kindselect = kindselector.match(selector) + if not kindselect: + outmess( + 'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector))) + return + kindselect = kindselect.groupdict() + kindselect['*'] = kindselect['kind2'] + del kindselect['kind2'] + for k in list(kindselect.keys()): + if not kindselect[k]: + del kindselect[k] + for k, i in list(kindselect.items()): + kindselect[k] = rmbadname1(i) + elif typespec == 'character': + charselect = charselector.match(selector) + if not charselect: + outmess( + 'cracktypespec: no charselector pattern found for %s\n' % (repr(selector))) + return + charselect = charselect.groupdict() + charselect['*'] = charselect['charlen'] + del charselect['charlen'] + if charselect['lenkind']: + lenkind = lenkindpattern.match( + markoutercomma(charselect['lenkind'])) + lenkind = lenkind.groupdict() + for lk in ['len', 'kind']: + if lenkind[lk + '2']: + lenkind[lk] = lenkind[lk + '2'] + charselect[lk] = lenkind[lk] + del lenkind[lk + '2'] + if lenkind['f2py_len'] is not None: + # used to specify the length of assumed length strings + charselect['f2py_len'] = lenkind['f2py_len'] + del charselect['lenkind'] + for k in list(charselect.keys()): + if not charselect[k]: + del charselect[k] + for k, i in list(charselect.items()): + charselect[k] = rmbadname1(i) + elif typespec == 'type': + typename = re.match(r'\s*\(\s*(?P\w+)\s*\)', selector, re.I) + if typename: + typename = typename.group('name') + else: + outmess('cracktypespec: no typename found in %s\n' % + (repr(typespec + selector))) + else: + outmess('cracktypespec: no selector used for %s\n' % + (repr(selector))) + return kindselect, charselect, typename +###### + + +def setattrspec(decl, attr, force=0): + if not decl: + decl = {} + if not attr: + return decl + if 'attrspec' not in decl: + decl['attrspec'] = [attr] + return decl + if force: + decl['attrspec'].append(attr) + if attr in decl['attrspec']: + return decl + if attr == 'static' and 'automatic' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'automatic' and 'static' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'public': + if 'private' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'private': + if 'public' not in decl['attrspec']: + decl['attrspec'].append(attr) + else: + decl['attrspec'].append(attr) + return decl + + +def setkindselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'kindselector' not in decl: + decl['kindselector'] = sel + return decl + for k in list(sel.keys()): + if force or k not in decl['kindselector']: + decl['kindselector'][k] = sel[k] + return decl + + +def setcharselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'charselector' not in decl: + decl['charselector'] = sel + return decl + + for k in list(sel.keys()): + if force or k not in decl['charselector']: + decl['charselector'][k] = sel[k] + return decl + + +def getblockname(block, unknown='unknown'): + if 'name' in block: + return block['name'] + return unknown + +# post processing + + +def setmesstext(block): + global filepositiontext + + try: + filepositiontext = 'In: %s:%s\n' % (block['from'], block['name']) + except Exception: + pass + + +def get_usedict(block): + usedict = {} + if 'parent_block' in block: + usedict = get_usedict(block['parent_block']) + if 'use' in block: + usedict.update(block['use']) + return usedict + + +def get_useparameters(block, param_map=None): + global f90modulevars + + if param_map is None: + param_map = {} + usedict = get_usedict(block) + if not usedict: + return param_map + for usename, mapping in list(usedict.items()): + usename = usename.lower() + if usename not in f90modulevars: + outmess('get_useparameters: no module %s info used by %s\n' % + (usename, block.get('name'))) + continue + mvars = f90modulevars[usename] + params = get_parameters(mvars) + if not params: + continue + # XXX: apply mapping + if mapping: + errmess('get_useparameters: mapping for %s not impl.\n' % (mapping)) + for k, v in list(params.items()): + if k in param_map: + outmess('get_useparameters: overriding parameter %s with' + ' value from module %s\n' % (repr(k), repr(usename))) + param_map[k] = v + + return param_map + + +def postcrack2(block, tab='', param_map=None): + global f90modulevars + + if not f90modulevars: + return block + if isinstance(block, list): + ret = [postcrack2(g, tab=tab + '\t', param_map=param_map) + for g in block] + return ret + setmesstext(block) + outmess('%sBlock: %s\n' % (tab, block['name']), 0) + + if param_map is None: + param_map = get_useparameters(block) + + if param_map is not None and 'vars' in block: + vars = block['vars'] + for n in list(vars.keys()): + var = vars[n] + if 'kindselector' in var: + kind = var['kindselector'] + if 'kind' in kind: + val = kind['kind'] + if val in param_map: + kind['kind'] = param_map[val] + new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map) + for b in block['body']] + block['body'] = new_body + + return block + + +def postcrack(block, args=None, tab=''): + """ + TODO: + function return values + determine expression types if in argument list + """ + global usermodules, onlyfunctions + + if isinstance(block, list): + gret = [] + uret = [] + for g in block: + setmesstext(g) + g = postcrack(g, tab=tab + '\t') + # sort user routines to appear first + if 'name' in g and '__user__' in g['name']: + uret.append(g) + else: + gret.append(g) + return uret + gret + setmesstext(block) + if not isinstance(block, dict) and 'block' not in block: + raise Exception('postcrack: Expected block dictionary instead of ' + + str(block)) + if 'name' in block and not block['name'] == 'unknown_interface': + outmess('%sBlock: %s\n' % (tab, block['name']), 0) + block = analyzeargs(block) + block = analyzecommon(block) + block['vars'] = analyzevars(block) + block['sortvars'] = sortvarnames(block['vars']) + if 'args' in block and block['args']: + args = block['args'] + block['body'] = analyzebody(block, args, tab=tab) + + userisdefined = [] + if 'use' in block: + useblock = block['use'] + for k in list(useblock.keys()): + if '__user__' in k: + userisdefined.append(k) + else: + useblock = {} + name = '' + if 'name' in block: + name = block['name'] + # and not userisdefined: # Build a __user__ module + if 'externals' in block and block['externals']: + interfaced = [] + if 'interfaced' in block: + interfaced = block['interfaced'] + mvars = copy.copy(block['vars']) + if name: + mname = name + '__user__routines' + else: + mname = 'unknown__user__routines' + if mname in userisdefined: + i = 1 + while '%s_%i' % (mname, i) in userisdefined: + i = i + 1 + mname = '%s_%i' % (mname, i) + interface = {'block': 'interface', 'body': [], + 'vars': {}, 'name': name + '_user_interface'} + for e in block['externals']: + if e in interfaced: + edef = [] + j = -1 + for b in block['body']: + j = j + 1 + if b['block'] == 'interface': + i = -1 + for bb in b['body']: + i = i + 1 + if 'name' in bb and bb['name'] == e: + edef = copy.copy(bb) + del b['body'][i] + break + if edef: + if not b['body']: + del block['body'][j] + del interfaced[interfaced.index(e)] + break + interface['body'].append(edef) + else: + if e in mvars and not isexternal(mvars[e]): + interface['vars'][e] = mvars[e] + if interface['vars'] or interface['body']: + block['interfaced'] = interfaced + mblock = {'block': 'python module', 'body': [ + interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']} + useblock[mname] = {} + usermodules.append(mblock) + if useblock: + block['use'] = useblock + return block + + +def sortvarnames(vars): + indep = [] + dep = [] + for v in list(vars.keys()): + if 'depend' in vars[v] and vars[v]['depend']: + dep.append(v) + else: + indep.append(v) + n = len(dep) + i = 0 + while dep: # XXX: How to catch dependence cycles correctly? + v = dep[0] + fl = 0 + for w in dep[1:]: + if w in vars[v]['depend']: + fl = 1 + break + if fl: + dep = dep[1:] + [v] + i = i + 1 + if i > n: + errmess('sortvarnames: failed to compute dependencies because' + ' of cyclic dependencies between ' + + ', '.join(dep) + '\n') + indep = indep + dep + break + else: + indep.append(v) + dep = dep[1:] + n = len(dep) + i = 0 + return indep + + +def analyzecommon(block): + if not hascommon(block): + return block + commonvars = [] + for k in list(block['common'].keys()): + comvars = [] + for e in block['common'][k]: + m = re.match( + r'\A\s*\b(?P.*?)\b\s*(\((?P.*?)\)|)\s*\Z', e, re.I) + if m: + dims = [] + if m.group('dims'): + dims = [x.strip() + for x in markoutercomma(m.group('dims')).split('@,@')] + n = rmbadname1(m.group('name').strip()) + if n in block['vars']: + if 'attrspec' in block['vars'][n]: + block['vars'][n]['attrspec'].append( + 'dimension(%s)' % (','.join(dims))) + else: + block['vars'][n]['attrspec'] = [ + 'dimension(%s)' % (','.join(dims))] + else: + if dims: + block['vars'][n] = { + 'attrspec': ['dimension(%s)' % (','.join(dims))]} + else: + block['vars'][n] = {} + if n not in commonvars: + commonvars.append(n) + else: + n = e + errmess( + 'analyzecommon: failed to extract "[()]" from "%s" in common /%s/.\n' % (e, k)) + comvars.append(n) + block['common'][k] = comvars + if 'commonvars' not in block: + block['commonvars'] = commonvars + else: + block['commonvars'] = block['commonvars'] + commonvars + return block + + +def analyzebody(block, args, tab=''): + global usermodules, skipfuncs, onlyfuncs, f90modulevars + + setmesstext(block) + + maybe_private = { + key: value + for key, value in block['vars'].items() + if 'attrspec' not in value or 'public' not in value['attrspec'] + } + + body = [] + for b in block['body']: + b['parent_block'] = block + if b['block'] in ['function', 'subroutine']: + if args is not None and b['name'] not in args: + continue + else: + as_ = b['args'] + # Add private members to skipfuncs for gh-23879 + if b['name'] in maybe_private.keys(): + skipfuncs.append(b['name']) + if b['name'] in skipfuncs: + continue + if onlyfuncs and b['name'] not in onlyfuncs: + continue + b['saved_interface'] = crack2fortrangen( + b, '\n' + ' ' * 6, as_interface=True) + + else: + as_ = args + b = postcrack(b, as_, tab=tab + '\t') + if b['block'] in ['interface', 'abstract interface'] and \ + not b['body'] and not b.get('implementedby'): + if 'f2pyenhancements' not in b: + continue + if b['block'].replace(' ', '') == 'pythonmodule': + usermodules.append(b) + else: + if b['block'] == 'module': + f90modulevars[b['name']] = b['vars'] + body.append(b) + return body + + +def buildimplicitrules(block): + setmesstext(block) + implicitrules = defaultimplicitrules + attrrules = {} + if 'implicit' in block: + if block['implicit'] is None: + implicitrules = None + if verbose > 1: + outmess( + 'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name'])) + else: + for k in list(block['implicit'].keys()): + if block['implicit'][k].get('typespec') not in ['static', 'automatic']: + implicitrules[k] = block['implicit'][k] + else: + attrrules[k] = block['implicit'][k]['typespec'] + return implicitrules, attrrules + + +def myeval(e, g=None, l=None): + """ Like `eval` but returns only integers and floats """ + r = eval(e, g, l) + if type(r) in [int, float]: + return r + raise ValueError('r=%r' % (r)) + +getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) + + +def getlincoef(e, xset): # e = a*x+b ; x in xset + """ + Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in + xset. + + >>> getlincoef('2*x + 1', {'x'}) + (2, 1, 'x') + >>> getlincoef('3*x + x*2 + 2 + 1', {'x'}) + (5, 3, 'x') + >>> getlincoef('0', {'x'}) + (0, 0, None) + >>> getlincoef('0*x', {'x'}) + (0, 0, 'x') + >>> getlincoef('x*x', {'x'}) + (None, None, None) + + This can be tricked by sufficiently complex expressions + + >>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'}) + (2.0, 3.0, 'x') + """ + try: + c = int(myeval(e, {}, {})) + return 0, c, None + except Exception: + pass + if getlincoef_re_1.match(e): + return 1, 0, e + len_e = len(e) + for x in xset: + if len(x) > len_e: + continue + if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e): + # skip function calls having x as an argument, e.g max(1, x) + continue + re_1 = re.compile(r'(?P.*?)\b' + x + r'\b(?P.*)', re.I) + m = re_1.match(e) + if m: + try: + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 0, m1.group('after')) + m1 = re_1.match(ee) + b = myeval(ee, {}, {}) + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 1, m1.group('after')) + m1 = re_1.match(ee) + a = myeval(ee, {}, {}) - b + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 0.5, m1.group('after')) + m1 = re_1.match(ee) + c = myeval(ee, {}, {}) + # computing another point to be sure that expression is linear + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 1.5, m1.group('after')) + m1 = re_1.match(ee) + c2 = myeval(ee, {}, {}) + if (a * 0.5 + b == c and a * 1.5 + b == c2): + return a, b, x + except Exception: + pass + break + return None, None, None + + +word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) + + +def _get_depend_dict(name, vars, deps): + if name in vars: + words = vars[name].get('depend', []) + + if '=' in vars[name] and not isstring(vars[name]): + for word in word_pattern.findall(vars[name]['=']): + # The word_pattern may return values that are not + # only variables, they can be string content for instance + if word not in words and word in vars and word != name: + words.append(word) + for word in words[:]: + for w in deps.get(word, []) \ + or _get_depend_dict(word, vars, deps): + if w not in words: + words.append(w) + else: + outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name))) + words = [] + deps[name] = words + return words + + +def _calc_depend_dict(vars): + names = list(vars.keys()) + depend_dict = {} + for n in names: + _get_depend_dict(n, vars, depend_dict) + return depend_dict + + +def get_sorted_names(vars): + depend_dict = _calc_depend_dict(vars) + names = [] + for name in list(depend_dict.keys()): + if not depend_dict[name]: + names.append(name) + del depend_dict[name] + while depend_dict: + for name, lst in list(depend_dict.items()): + new_lst = [n for n in lst if n in depend_dict] + if not new_lst: + names.append(name) + del depend_dict[name] + else: + depend_dict[name] = new_lst + return [name for name in names if name in vars] + + +def _kind_func(string): + # XXX: return something sensible. + if string[0] in "'\"": + string = string[1:-1] + if real16pattern.match(string): + return 8 + elif real8pattern.match(string): + return 4 + return 'kind(' + string + ')' + + +def _selected_int_kind_func(r): + # XXX: This should be processor dependent + m = 10 ** r + if m <= 2 ** 8: + return 1 + if m <= 2 ** 16: + return 2 + if m <= 2 ** 32: + return 4 + if m <= 2 ** 63: + return 8 + if m <= 2 ** 128: + return 16 + return -1 + + +def _selected_real_kind_func(p, r=0, radix=0): + # XXX: This should be processor dependent + # This is only verified for 0 <= p <= 20, possibly good for p <= 33 and above + if p < 7: + return 4 + if p < 16: + return 8 + machine = platform.machine().lower() + if machine.startswith(('aarch64', 'alpha', 'arm64', 'loongarch', 'mips', 'power', 'ppc', 'riscv', 's390x', 'sparc')): + if p <= 33: + return 16 + else: + if p < 19: + return 10 + elif p <= 33: + return 16 + return -1 + + +def get_parameters(vars, global_params={}): + params = copy.copy(global_params) + g_params = copy.copy(global_params) + for name, func in [('kind', _kind_func), + ('selected_int_kind', _selected_int_kind_func), + ('selected_real_kind', _selected_real_kind_func), ]: + if name not in g_params: + g_params[name] = func + param_names = [] + for n in get_sorted_names(vars): + if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: + param_names.append(n) + kind_re = re.compile(r'\bkind\s*\(\s*(?P.*)\s*\)', re.I) + selected_int_kind_re = re.compile( + r'\bselected_int_kind\s*\(\s*(?P.*)\s*\)', re.I) + selected_kind_re = re.compile( + r'\bselected_(int|real)_kind\s*\(\s*(?P.*)\s*\)', re.I) + for n in param_names: + if '=' in vars[n]: + v = vars[n]['='] + if islogical(vars[n]): + v = v.lower() + for repl in [ + ('.false.', 'False'), + ('.true.', 'True'), + # TODO: test .eq., .neq., etc replacements. + ]: + v = v.replace(*repl) + + v = kind_re.sub(r'kind("\1")', v) + v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) + + # We need to act according to the data. + # The easy case is if the data has a kind-specifier, + # then we may easily remove those specifiers. + # However, it may be that the user uses other specifiers...(!) + is_replaced = False + + if 'kindselector' in vars[n]: + # Remove kind specifier (including those defined + # by parameters) + if 'kind' in vars[n]['kindselector']: + orig_v_len = len(v) + v = v.replace('_' + vars[n]['kindselector']['kind'], '') + # Again, this will be true if even a single specifier + # has been replaced, see comment above. + is_replaced = len(v) < orig_v_len + + if not is_replaced: + if not selected_kind_re.match(v): + v_ = v.split('_') + # In case there are additive parameters + if len(v_) > 1: + v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '') + + # Currently this will not work for complex numbers. + # There is missing code for extracting a complex number, + # which may be defined in either of these: + # a) (Re, Im) + # b) cmplx(Re, Im) + # c) dcmplx(Re, Im) + # d) cmplx(Re, Im, ) + + if isdouble(vars[n]): + tt = list(v) + for m in real16pattern.finditer(v): + tt[m.start():m.end()] = list( + v[m.start():m.end()].lower().replace('d', 'e')) + v = ''.join(tt) + + elif iscomplex(vars[n]): + outmess(f'get_parameters[TODO]: ' + f'implement evaluation of complex expression {v}\n') + + dimspec = ([s.lstrip('dimension').strip() + for s in vars[n]['attrspec'] + if s.startswith('dimension')] or [None])[0] + + # Handle _dp for gh-6624 + # Also fixes gh-20460 + if real16pattern.search(v): + v = 8 + elif real8pattern.search(v): + v = 4 + try: + params[n] = param_eval(v, g_params, params, dimspec=dimspec) + except Exception as msg: + params[n] = v + outmess(f'get_parameters: got "{msg}" on {n!r}\n') + + if isstring(vars[n]) and isinstance(params[n], int): + params[n] = chr(params[n]) + nl = n.lower() + if nl != n: + params[nl] = params[n] + else: + print(vars[n]) + outmess(f'get_parameters:parameter {n!r} does not have value?!\n') + return params + + +def _eval_length(length, params): + if length in ['(:)', '(*)', '*']: + return '(*)' + return _eval_scalar(length, params) + + +_is_kind_number = re.compile(r'\d+_').match + + +def _eval_scalar(value, params): + if _is_kind_number(value): + value = value.split('_')[0] + try: + # TODO: use symbolic from PR #19805 + value = eval(value, {}, params) + value = (repr if isinstance(value, str) else str)(value) + except (NameError, SyntaxError, TypeError): + return value + except Exception as msg: + errmess('"%s" in evaluating %r ' + '(available names: %s)\n' + % (msg, value, list(params.keys()))) + return value + + +def analyzevars(block): + """ + Sets correct dimension information for each variable/parameter + """ + + global f90modulevars + + setmesstext(block) + implicitrules, attrrules = buildimplicitrules(block) + vars = copy.copy(block['vars']) + if block['block'] == 'function' and block['name'] not in vars: + vars[block['name']] = {} + if '' in block['vars']: + del vars[''] + if 'attrspec' in block['vars']['']: + gen = block['vars']['']['attrspec'] + for n in set(vars) | set(b['name'] for b in block['body']): + for k in ['public', 'private']: + if k in gen: + vars[n] = setattrspec(vars.get(n, {}), k) + svars = [] + args = block['args'] + for a in args: + try: + vars[a] + svars.append(a) + except KeyError: + pass + for n in list(vars.keys()): + if n not in args: + svars.append(n) + + params = get_parameters(vars, get_useparameters(block)) + # At this point, params are read and interpreted, but + # the params used to define vars are not yet parsed + dep_matches = {} + name_match = re.compile(r'[A-Za-z][\w$]*').match + for v in list(vars.keys()): + m = name_match(v) + if m: + n = v[m.start():m.end()] + try: + dep_matches[n] + except KeyError: + dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match + for n in svars: + if n[0] in list(attrrules.keys()): + vars[n] = setattrspec(vars[n], attrrules[n[0]]) + if 'typespec' not in vars[n]: + if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): + if implicitrules: + ln0 = n[0].lower() + for k in list(implicitrules[ln0].keys()): + if k == 'typespec' and implicitrules[ln0][k] == 'undefined': + continue + if k not in vars[n]: + vars[n][k] = implicitrules[ln0][k] + elif k == 'attrspec': + for l in implicitrules[ln0][k]: + vars[n] = setattrspec(vars[n], l) + elif n in block['args']: + outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % ( + repr(n), block['name'])) + if 'charselector' in vars[n]: + if 'len' in vars[n]['charselector']: + l = vars[n]['charselector']['len'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['charselector']['len'] = l + + if 'kindselector' in vars[n]: + if 'kind' in vars[n]['kindselector']: + l = vars[n]['kindselector']['kind'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['kindselector']['kind'] = l + + dimension_exprs = {} + if 'attrspec' in vars[n]: + attr = vars[n]['attrspec'] + attr.reverse() + vars[n]['attrspec'] = [] + dim, intent, depend, check, note = None, None, None, None, None + for a in attr: + if a[:9] == 'dimension': + dim = (a[9:].strip())[1:-1] + elif a[:6] == 'intent': + intent = (a[6:].strip())[1:-1] + elif a[:6] == 'depend': + depend = (a[6:].strip())[1:-1] + elif a[:5] == 'check': + check = (a[5:].strip())[1:-1] + elif a[:4] == 'note': + note = (a[4:].strip())[1:-1] + else: + vars[n] = setattrspec(vars[n], a) + if intent: + if 'intent' not in vars[n]: + vars[n]['intent'] = [] + for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: + # Remove spaces so that 'in out' becomes 'inout' + tmp = c.replace(' ', '') + if tmp not in vars[n]['intent']: + vars[n]['intent'].append(tmp) + intent = None + if note: + note = note.replace('\\n\\n', '\n\n') + note = note.replace('\\n ', '\n') + if 'note' not in vars[n]: + vars[n]['note'] = [note] + else: + vars[n]['note'].append(note) + note = None + if depend is not None: + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): + if c not in vars[n]['depend']: + vars[n]['depend'].append(c) + depend = None + if check is not None: + if 'check' not in vars[n]: + vars[n]['check'] = [] + for c in [x.strip() for x in markoutercomma(check).split('@,@')]: + if c not in vars[n]['check']: + vars[n]['check'].append(c) + check = None + if dim and 'dimension' not in vars[n]: + vars[n]['dimension'] = [] + for d in rmbadname( + [x.strip() for x in markoutercomma(dim).split('@,@')] + ): + # d is the expression inside the dimension declaration + # Evaluate `d` with respect to params + try: + # the dimension for this variable depends on a + # previously defined parameter + d = param_parse(d, params) + except (ValueError, IndexError, KeyError): + outmess( + ('analyzevars: could not parse dimension for ' + f'variable {d!r}\n') + ) + + dim_char = ':' if d == ':' else '*' + if d == dim_char: + dl = [dim_char] + else: + dl = markoutercomma(d, ':').split('@:@') + if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*) + dl = ['*'] + d = '*' + if len(dl) == 1 and dl[0] != dim_char: + dl = ['1', dl[0]] + if len(dl) == 2: + d1, d2 = map(symbolic.Expr.parse, dl) + dsize = d2 - d1 + 1 + d = dsize.tostring(language=symbolic.Language.C) + # find variables v that define d as a linear + # function, `d == a * v + b`, and store + # coefficients a and b for further analysis. + solver_and_deps = {} + for v in block['vars']: + s = symbolic.as_symbol(v) + if dsize.contains(s): + try: + a, b = dsize.linear_solve(s) + + def solve_v(s, a=a, b=b): + return (s - b) / a + + all_symbols = set(a.symbols()) + all_symbols.update(b.symbols()) + except RuntimeError as msg: + # d is not a linear function of v, + # however, if v can be determined + # from d using other means, + # implement the corresponding + # solve_v function here. + solve_v = None + all_symbols = set(dsize.symbols()) + v_deps = set( + s.data for s in all_symbols + if s.data in vars) + solver_and_deps[v] = solve_v, list(v_deps) + # Note that dsize may contain symbols that are + # not defined in block['vars']. Here we assume + # these correspond to Fortran/C intrinsic + # functions or that are defined by other + # means. We'll let the compiler validate the + # definiteness of such symbols. + dimension_exprs[d] = solver_and_deps + vars[n]['dimension'].append(d) + + if 'check' not in vars[n] and 'args' in block and n in block['args']: + # n is an argument that has no checks defined. Here we + # generate some consistency checks for n, and when n is an + # array, generate checks for its dimensions and construct + # initialization expressions. + n_deps = vars[n].get('depend', []) + n_checks = [] + n_is_input = l_or(isintent_in, isintent_inout, + isintent_inplace)(vars[n]) + if isarray(vars[n]): # n is array + for i, d in enumerate(vars[n]['dimension']): + coeffs_and_deps = dimension_exprs.get(d) + if coeffs_and_deps is None: + # d is `:` or `*` or a constant expression + pass + elif n_is_input: + # n is an input array argument and its shape + # may define variables used in dimension + # specifications. + for v, (solver, deps) in coeffs_and_deps.items(): + def compute_deps(v, deps): + for v1 in coeffs_and_deps.get(v, [None, []])[1]: + if v1 not in deps: + deps.add(v1) + compute_deps(v1, deps) + all_deps = set() + compute_deps(v, all_deps) + if ((v in n_deps + or '=' in vars[v] + or 'depend' in vars[v])): + # Skip a variable that + # - n depends on + # - has user-defined initialization expression + # - has user-defined dependencies + continue + if solver is not None and v not in all_deps: + # v can be solved from d, hence, we + # make it an optional argument with + # initialization expression: + is_required = False + init = solver(symbolic.as_symbol( + f'shape({n}, {i})')) + init = init.tostring( + language=symbolic.Language.C) + vars[v]['='] = init + # n needs to be initialized before v. So, + # making v dependent on n and on any + # variables in solver or d. + vars[v]['depend'] = [n] + deps + if 'check' not in vars[v]: + # add check only when no + # user-specified checks exist + vars[v]['check'] = [ + f'shape({n}, {i}) == {d}'] + else: + # d is a non-linear function on v, + # hence, v must be a required input + # argument that n will depend on + is_required = True + if 'intent' not in vars[v]: + vars[v]['intent'] = [] + if 'in' not in vars[v]['intent']: + vars[v]['intent'].append('in') + # v needs to be initialized before n + n_deps.append(v) + n_checks.append( + f'shape({n}, {i}) == {d}') + v_attr = vars[v].get('attrspec', []) + if not ('optional' in v_attr + or 'required' in v_attr): + v_attr.append( + 'required' if is_required else 'optional') + if v_attr: + vars[v]['attrspec'] = v_attr + if coeffs_and_deps is not None: + # extend v dependencies with ones specified in attrspec + for v, (solver, deps) in coeffs_and_deps.items(): + v_deps = vars[v].get('depend', []) + for aa in vars[v].get('attrspec', []): + if aa.startswith('depend'): + aa = ''.join(aa.split()) + v_deps.extend(aa[7:-1].split(',')) + if v_deps: + vars[v]['depend'] = list(set(v_deps)) + if n not in v_deps: + n_deps.append(v) + elif isstring(vars[n]): + if 'charselector' in vars[n]: + if '*' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['*'], + params) + vars[n]['charselector']['*'] = length + elif 'len' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['len'], + params) + del vars[n]['charselector']['len'] + vars[n]['charselector']['*'] = length + if n_checks: + vars[n]['check'] = n_checks + if n_deps: + vars[n]['depend'] = list(set(n_deps)) + + if '=' in vars[n]: + if 'attrspec' not in vars[n]: + vars[n]['attrspec'] = [] + if ('optional' not in vars[n]['attrspec']) and \ + ('required' not in vars[n]['attrspec']): + vars[n]['attrspec'].append('optional') + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for v, m in list(dep_matches.items()): + if m(vars[n]['=']): + vars[n]['depend'].append(v) + if not vars[n]['depend']: + del vars[n]['depend'] + if isscalar(vars[n]): + vars[n]['='] = _eval_scalar(vars[n]['='], params) + + for n in list(vars.keys()): + if n == block['name']: # n is block name + if 'note' in vars[n]: + block['note'] = vars[n]['note'] + if block['block'] == 'function': + if 'result' in block and block['result'] in vars: + vars[n] = appenddecl(vars[n], vars[block['result']]) + if 'prefix' in block: + pr = block['prefix'] + pr1 = pr.replace('pure', '') + ispure = (not pr == pr1) + pr = pr1.replace('recursive', '') + isrec = (not pr == pr1) + m = typespattern[0].match(pr) + if m: + typespec, selector, attr, edecl = cracktypespec0( + m.group('this'), m.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + vars[n]['typespec'] = typespec + try: + if block['result']: + vars[block['result']]['typespec'] = typespec + except Exception: + pass + if kindselect: + if 'kind' in kindselect: + try: + kindselect['kind'] = eval( + kindselect['kind'], {}, params) + except Exception: + pass + vars[n]['kindselector'] = kindselect + if charselect: + vars[n]['charselector'] = charselect + if typename: + vars[n]['typename'] = typename + if ispure: + vars[n] = setattrspec(vars[n], 'pure') + if isrec: + vars[n] = setattrspec(vars[n], 'recursive') + else: + outmess( + 'analyzevars: prefix (%s) were not used\n' % repr(block['prefix'])) + if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']: + if 'commonvars' in block: + neededvars = copy.copy(block['args'] + block['commonvars']) + else: + neededvars = copy.copy(block['args']) + for n in list(vars.keys()): + if l_or(isintent_callback, isintent_aux)(vars[n]): + neededvars.append(n) + if 'entry' in block: + neededvars.extend(list(block['entry'].keys())) + for k in list(block['entry'].keys()): + for n in block['entry'][k]: + if n not in neededvars: + neededvars.append(n) + if block['block'] == 'function': + if 'result' in block: + neededvars.append(block['result']) + else: + neededvars.append(block['name']) + if block['block'] in ['subroutine', 'function']: + name = block['name'] + if name in vars and 'intent' in vars[name]: + block['intent'] = vars[name]['intent'] + if block['block'] == 'type': + neededvars.extend(list(vars.keys())) + for n in list(vars.keys()): + if n not in neededvars: + del vars[n] + return vars + + +analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) + + +def param_eval(v, g_params, params, dimspec=None): + """ + Creates a dictionary of indices and values for each parameter in a + parameter array to be evaluated later. + + WARNING: It is not possible to initialize multidimensional array + parameters e.g. dimension(-3:1, 4, 3:5) at this point. This is because in + Fortran initialization through array constructor requires the RESHAPE + intrinsic function. Since the right-hand side of the parameter declaration + is not executed in f2py, but rather at the compiled c/fortran extension, + later, it is not possible to execute a reshape of a parameter array. + One issue remains: if the user wants to access the array parameter from + python, we should either + 1) allow them to access the parameter array using python standard indexing + (which is often incompatible with the original fortran indexing) + 2) allow the parameter array to be accessed in python as a dictionary with + fortran indices as keys + We are choosing 2 for now. + """ + if dimspec is None: + try: + p = eval(v, g_params, params) + except Exception as msg: + p = v + outmess(f'param_eval: got "{msg}" on {v!r}\n') + return p + + # This is an array parameter. + # First, we parse the dimension information + if len(dimspec) < 2 or dimspec[::len(dimspec)-1] != "()": + raise ValueError(f'param_eval: dimension {dimspec} can\'t be parsed') + dimrange = dimspec[1:-1].split(',') + if len(dimrange) == 1: + # e.g. dimension(2) or dimension(-1:1) + dimrange = dimrange[0].split(':') + # now, dimrange is a list of 1 or 2 elements + if len(dimrange) == 1: + bound = param_parse(dimrange[0], params) + dimrange = range(1, int(bound)+1) + else: + lbound = param_parse(dimrange[0], params) + ubound = param_parse(dimrange[1], params) + dimrange = range(int(lbound), int(ubound)+1) + else: + raise ValueError(f'param_eval: multidimensional array parameters ' + '{dimspec} not supported') + + # Parse parameter value + v = (v[2:-2] if v.startswith('(/') else v).split(',') + v_eval = [] + for item in v: + try: + item = eval(item, g_params, params) + except Exception as msg: + outmess(f'param_eval: got "{msg}" on {item!r}\n') + v_eval.append(item) + + p = dict(zip(dimrange, v_eval)) + + return p + + +def param_parse(d, params): + """Recursively parse array dimensions. + + Parses the declaration of an array variable or parameter + `dimension` keyword, and is called recursively if the + dimension for this array is a previously defined parameter + (found in `params`). + + Parameters + ---------- + d : str + Fortran expression describing the dimension of an array. + params : dict + Previously parsed parameters declared in the Fortran source file. + + Returns + ------- + out : str + Parsed dimension expression. + + Examples + -------- + + * If the line being analyzed is + + `integer, parameter, dimension(2) :: pa = (/ 3, 5 /)` + + then `d = 2` and we return immediately, with + + >>> d = '2' + >>> param_parse(d, params) + 2 + + * If the line being analyzed is + + `integer, parameter, dimension(pa) :: pb = (/1, 2, 3/)` + + then `d = 'pa'`; since `pa` is a previously parsed parameter, + and `pa = 3`, we call `param_parse` recursively, to obtain + + >>> d = 'pa' + >>> params = {'pa': 3} + >>> param_parse(d, params) + 3 + + * If the line being analyzed is + + `integer, parameter, dimension(pa(1)) :: pb = (/1, 2, 3/)` + + then `d = 'pa(1)'`; since `pa` is a previously parsed parameter, + and `pa(1) = 3`, we call `param_parse` recursively, to obtain + + >>> d = 'pa(1)' + >>> params = dict(pa={1: 3, 2: 5}) + >>> param_parse(d, params) + 3 + """ + if "(" in d: + # this dimension expression is an array + dname = d[:d.find("(")] + ddims = d[d.find("(")+1:d.rfind(")")] + # this dimension expression is also a parameter; + # parse it recursively + index = int(param_parse(ddims, params)) + return str(params[dname][index]) + elif d in params: + return str(params[d]) + else: + for p in params: + re_1 = re.compile( + r'(?P.*?)\b' + p + r'\b(?P.*)', re.I + ) + m = re_1.match(d) + while m: + d = m.group('before') + \ + str(params[p]) + m.group('after') + m = re_1.match(d) + return d + + +def expr2name(a, block, args=[]): + orig_a = a + a_is_expr = not analyzeargs_re_1.match(a) + if a_is_expr: # `a` is an expression + implicitrules, attrrules = buildimplicitrules(block) + at = determineexprtype(a, block['vars'], implicitrules) + na = 'e_' + for c in a: + c = c.lower() + if c not in string.ascii_lowercase + string.digits: + c = '_' + na = na + c + if na[-1] == '_': + na = na + 'e' + else: + na = na + '_e' + a = na + while a in block['vars'] or a in block['args']: + a = a + 'r' + if a in args: + k = 1 + while a + str(k) in args: + k = k + 1 + a = a + str(k) + if a_is_expr: + block['vars'][a] = at + else: + if a not in block['vars']: + if orig_a in block['vars']: + block['vars'][a] = block['vars'][orig_a] + else: + block['vars'][a] = {} + if 'externals' in block and orig_a in block['externals'] + block['interfaced']: + block['vars'][a] = setattrspec(block['vars'][a], 'external') + return a + + +def analyzeargs(block): + setmesstext(block) + implicitrules, _ = buildimplicitrules(block) + if 'args' not in block: + block['args'] = [] + args = [] + for a in block['args']: + a = expr2name(a, block, args) + args.append(a) + block['args'] = args + if 'entry' in block: + for k, args1 in list(block['entry'].items()): + for a in args1: + if a not in block['vars']: + block['vars'][a] = {} + + for b in block['body']: + if b['name'] in args: + if 'externals' not in block: + block['externals'] = [] + if b['name'] not in block['externals']: + block['externals'].append(b['name']) + if 'result' in block and block['result'] not in block['vars']: + block['vars'][block['result']] = {} + return block + +determineexprtype_re_1 = re.compile(r'\A\(.+?,.+?\)\Z', re.I) +determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P\w+)|)\Z', re.I) +determineexprtype_re_3 = re.compile( + r'\A[+-]?[\d.]+[-\d+de.]*(_(?P\w+)|)\Z', re.I) +determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) +determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z', re.I) + + +def _ensure_exprdict(r): + if isinstance(r, int): + return {'typespec': 'integer'} + if isinstance(r, float): + return {'typespec': 'real'} + if isinstance(r, complex): + return {'typespec': 'complex'} + if isinstance(r, dict): + return r + raise AssertionError(repr(r)) + + +def determineexprtype(expr, vars, rules={}): + if expr in vars: + return _ensure_exprdict(vars[expr]) + expr = expr.strip() + if determineexprtype_re_1.match(expr): + return {'typespec': 'complex'} + m = determineexprtype_re_2.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) + return {'typespec': 'integer'} + m = determineexprtype_re_3.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) + return {'typespec': 'real'} + for op in ['+', '-', '*', '/']: + for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]: + if e in vars: + return _ensure_exprdict(vars[e]) + t = {} + if determineexprtype_re_4.match(expr): # in parenthesis + t = determineexprtype(expr[1:-1], vars, rules) + else: + m = determineexprtype_re_5.match(expr) + if m: + rn = m.group('name') + t = determineexprtype(m.group('name'), vars, rules) + if t and 'attrspec' in t: + del t['attrspec'] + if not t: + if rn[0] in rules: + return _ensure_exprdict(rules[rn[0]]) + if expr[0] in '\'"': + return {'typespec': 'character', 'charselector': {'*': '*'}} + if not t: + outmess( + 'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr))) + return t + +###### + + +def crack2fortrangen(block, tab='\n', as_interface=False): + global skipfuncs, onlyfuncs + + setmesstext(block) + ret = '' + if isinstance(block, list): + for g in block: + if g and g['block'] in ['function', 'subroutine']: + if g['name'] in skipfuncs: + continue + if onlyfuncs and g['name'] not in onlyfuncs: + continue + ret = ret + crack2fortrangen(g, tab, as_interface=as_interface) + return ret + prefix = '' + name = '' + args = '' + blocktype = block['block'] + if blocktype == 'program': + return '' + argsl = [] + if 'name' in block: + name = block['name'] + if 'args' in block: + vars = block['vars'] + for a in block['args']: + a = expr2name(a, block, argsl) + if not isintent_callback(vars[a]): + argsl.append(a) + if block['block'] == 'function' or argsl: + args = '(%s)' % ','.join(argsl) + f2pyenhancements = '' + if 'f2pyenhancements' in block: + for k in list(block['f2pyenhancements'].keys()): + f2pyenhancements = '%s%s%s %s' % ( + f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k]) + intent_lst = block.get('intent', [])[:] + if blocktype == 'function' and 'callback' in intent_lst: + intent_lst.remove('callback') + if intent_lst: + f2pyenhancements = '%s%sintent(%s) %s' %\ + (f2pyenhancements, tab + tabchar, + ','.join(intent_lst), name) + use = '' + if 'use' in block: + use = use2fortran(block['use'], tab + tabchar) + common = '' + if 'common' in block: + common = common2fortran(block['common'], tab + tabchar) + if name == 'unknown_interface': + name = '' + result = '' + if 'result' in block: + result = ' result (%s)' % block['result'] + if block['result'] not in argsl: + argsl.append(block['result']) + body = crack2fortrangen(block['body'], tab + tabchar, as_interface=as_interface) + vars = vars2fortran( + block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) + mess = '' + if 'from' in block and not as_interface: + mess = '! in %s' % block['from'] + if 'entry' in block: + entry_stmts = '' + for k, i in list(block['entry'].items()): + entry_stmts = '%s%sentry %s(%s)' \ + % (entry_stmts, tab + tabchar, k, ','.join(i)) + body = body + entry_stmts + if blocktype == 'block data' and name == '_BLOCK_DATA_': + name = '' + ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % ( + tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) + return ret + + +def common2fortran(common, tab=''): + ret = '' + for k in list(common.keys()): + if k == '_BLNK_': + ret = '%s%scommon %s' % (ret, tab, ','.join(common[k])) + else: + ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k])) + return ret + + +def use2fortran(use, tab=''): + ret = '' + for m in list(use.keys()): + ret = '%s%suse %s,' % (ret, tab, m) + if use[m] == {}: + if ret and ret[-1] == ',': + ret = ret[:-1] + continue + if 'only' in use[m] and use[m]['only']: + ret = '%s only:' % (ret) + if 'map' in use[m] and use[m]['map']: + c = ' ' + for k in list(use[m]['map'].keys()): + if k == use[m]['map'][k]: + ret = '%s%s%s' % (ret, c, k) + c = ',' + else: + ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k]) + c = ',' + if ret and ret[-1] == ',': + ret = ret[:-1] + return ret + + +def true_intent_list(var): + lst = var['intent'] + ret = [] + for intent in lst: + try: + f = globals()['isintent_%s' % intent] + except KeyError: + pass + else: + if f(var): + ret.append(intent) + return ret + + +def vars2fortran(block, vars, args, tab='', as_interface=False): + setmesstext(block) + ret = '' + nout = [] + for a in args: + if a in block['vars']: + nout.append(a) + if 'commonvars' in block: + for a in block['commonvars']: + if a in vars: + if a not in nout: + nout.append(a) + else: + errmess( + 'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a) + if 'varnames' in block: + nout.extend(block['varnames']) + if not as_interface: + for a in list(vars.keys()): + if a not in nout: + nout.append(a) + for a in nout: + if 'depend' in vars[a]: + for d in vars[a]['depend']: + if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: + errmess( + 'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d)) + if 'externals' in block and a in block['externals']: + if isintent_callback(vars[a]): + ret = '%s%sintent(callback) %s' % (ret, tab, a) + ret = '%s%sexternal %s' % (ret, tab, a) + if isoptional(vars[a]): + ret = '%s%soptional %s' % (ret, tab, a) + if a in vars and 'typespec' not in vars[a]: + continue + cont = 1 + for b in block['body']: + if a == b['name'] and b['block'] == 'function': + cont = 0 + break + if cont: + continue + if a not in vars: + show(vars) + outmess('vars2fortran: No definition for argument "%s".\n' % a) + continue + if a == block['name']: + if block['block'] != 'function' or block.get('result'): + # 1) skip declaring a variable that name matches with + # subroutine name + # 2) skip declaring function when its type is + # declared via `result` construction + continue + if 'typespec' not in vars[a]: + if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: + if a in args: + ret = '%s%sexternal %s' % (ret, tab, a) + continue + show(vars[a]) + outmess('vars2fortran: No typespec for argument "%s".\n' % a) + continue + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = '%s(%s)' % (vardef, vars[a]['typename']) + selector = {} + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + if '*' in selector: + if selector['*'] in ['*', ':']: + vardef = '%s*(%s)' % (vardef, selector['*']) + else: + vardef = '%s*%s' % (vardef, selector['*']) + else: + if 'len' in selector: + vardef = '%s(len=%s' % (vardef, selector['len']) + if 'kind' in selector: + vardef = '%s,kind=%s)' % (vardef, selector['kind']) + else: + vardef = '%s)' % (vardef) + elif 'kind' in selector: + vardef = '%s(kind=%s)' % (vardef, selector['kind']) + c = ' ' + if 'attrspec' in vars[a]: + attr = [l for l in vars[a]['attrspec'] + if l not in ['external']] + if as_interface and 'intent(in)' in attr and 'intent(out)' in attr: + # In Fortran, intent(in, out) are conflicting while + # intent(in, out) can be specified only via + # `!f2py intent(out) ..`. + # So, for the Fortran interface, we'll drop + # intent(out) to resolve the conflict. + attr.remove('intent(out)') + if attr: + vardef = '%s, %s' % (vardef, ','.join(attr)) + c = ',' + if 'dimension' in vars[a]: + vardef = '%s%sdimension(%s)' % ( + vardef, c, ','.join(vars[a]['dimension'])) + c = ',' + if 'intent' in vars[a]: + lst = true_intent_list(vars[a]) + if lst: + vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst)) + c = ',' + if 'check' in vars[a]: + vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check'])) + c = ',' + if 'depend' in vars[a]: + vardef = '%s%sdepend(%s)' % ( + vardef, c, ','.join(vars[a]['depend'])) + c = ',' + if '=' in vars[a]: + v = vars[a]['='] + if vars[a]['typespec'] in ['complex', 'double complex']: + try: + v = eval(v) + v = '(%s,%s)' % (v.real, v.imag) + except Exception: + pass + vardef = '%s :: %s=%s' % (vardef, a, v) + else: + vardef = '%s :: %s' % (vardef, a) + ret = '%s%s%s' % (ret, tab, vardef) + return ret +###### + + +# We expose post_processing_hooks as global variable so that +# user-libraries could register their own hooks to f2py. +post_processing_hooks = [] + + +def crackfortran(files): + global usermodules, post_processing_hooks + + outmess('Reading fortran codes...\n', 0) + readfortrancode(files, crackline) + outmess('Post-processing...\n', 0) + usermodules = [] + postlist = postcrack(grouplist[0]) + outmess('Applying post-processing hooks...\n', 0) + for hook in post_processing_hooks: + outmess(f' {hook.__name__}\n', 0) + postlist = traverse(postlist, hook) + outmess('Post-processing (stage 2)...\n', 0) + postlist = postcrack2(postlist) + return usermodules + postlist + + +def crack2fortran(block): + global f2py_version + + pyf = crack2fortrangen(block) + '\n' + header = """! -*- f90 -*- +! Note: the context of this file is case sensitive. +""" + footer = """ +! This file was auto-generated with f2py (version:%s). +! See: +! https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e +""" % (f2py_version) + return header + pyf + footer + + +def _is_visit_pair(obj): + return (isinstance(obj, tuple) + and len(obj) == 2 + and isinstance(obj[0], (int, str))) + + +def traverse(obj, visit, parents=[], result=None, *args, **kwargs): + '''Traverse f2py data structure with the following visit function: + + def visit(item, parents, result, *args, **kwargs): + """ + + parents is a list of key-"f2py data structure" pairs from which + items are taken from. + + result is a f2py data structure that is filled with the + return value of the visit function. + + item is 2-tuple (index, value) if parents[-1][1] is a list + item is 2-tuple (key, value) if parents[-1][1] is a dict + + The return value of visit must be None, or of the same kind as + item, that is, if parents[-1] is a list, the return value must + be 2-tuple (new_index, new_value), or if parents[-1] is a + dict, the return value must be 2-tuple (new_key, new_value). + + If new_index or new_value is None, the return value of visit + is ignored, that is, it will not be added to the result. + + If the return value is None, the content of obj will be + traversed, otherwise not. + """ + ''' + + if _is_visit_pair(obj): + if obj[0] == 'parent_block': + # avoid infinite recursion + return obj + new_result = visit(obj, parents, result, *args, **kwargs) + if new_result is not None: + assert _is_visit_pair(new_result) + return new_result + parent = obj + result_key, obj = obj + else: + parent = (None, obj) + result_key = None + + if isinstance(obj, list): + new_result = [] + for index, value in enumerate(obj): + new_index, new_item = traverse((index, value), visit, + parents=parents + [parent], + result=result, *args, **kwargs) + if new_index is not None: + new_result.append(new_item) + elif isinstance(obj, dict): + new_result = dict() + for key, value in obj.items(): + new_key, new_value = traverse((key, value), visit, + parents=parents + [parent], + result=result, *args, **kwargs) + if new_key is not None: + new_result[new_key] = new_value + else: + new_result = obj + + if result_key is None: + return new_result + return result_key, new_result + + +def character_backward_compatibility_hook(item, parents, result, + *args, **kwargs): + """Previously, Fortran character was incorrectly treated as + character*1. This hook fixes the usage of the corresponding + variables in `check`, `dimension`, `=`, and `callstatement` + expressions. + + The usage of `char*` in `callprotoargument` expression can be left + unchanged because C `character` is C typedef of `char`, although, + new implementations should use `character*` in the corresponding + expressions. + + See https://github.com/numpy/numpy/pull/19388 for more information. + + """ + parent_key, parent_value = parents[-1] + key, value = item + + def fix_usage(varname, value): + value = re.sub(r'[*]\s*\b' + varname + r'\b', varname, value) + value = re.sub(r'\b' + varname + r'\b\s*[\[]\s*0\s*[\]]', + varname, value) + return value + + if parent_key in ['dimension', 'check']: + assert parents[-3][0] == 'vars' + vars_dict = parents[-3][1] + elif key == '=': + assert parents[-2][0] == 'vars' + vars_dict = parents[-2][1] + else: + vars_dict = None + + new_value = None + if vars_dict is not None: + new_value = value + for varname, vd in vars_dict.items(): + if ischaracter(vd): + new_value = fix_usage(varname, new_value) + elif key == 'callstatement': + vars_dict = parents[-2][1]['vars'] + new_value = value + for varname, vd in vars_dict.items(): + if ischaracter(vd): + # replace all occurrences of `` with + # `&` in argument passing + new_value = re.sub( + r'(? `{new_value}`\n', 1) + return (key, new_value) + + +post_processing_hooks.append(character_backward_compatibility_hook) + + +if __name__ == "__main__": + files = [] + funcs = [] + f = 1 + f2 = 0 + f3 = 0 + showblocklist = 0 + for l in sys.argv[1:]: + if l == '': + pass + elif l[0] == ':': + f = 0 + elif l == '-quiet': + quiet = 1 + verbose = 0 + elif l == '-verbose': + verbose = 2 + quiet = 0 + elif l == '-fix': + if strictf77: + outmess( + 'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) + skipemptyends = 1 + sourcecodeform = 'fix' + elif l == '-skipemptyends': + skipemptyends = 1 + elif l == '--ignore-contains': + ignorecontains = 1 + elif l == '-f77': + strictf77 = 1 + sourcecodeform = 'fix' + elif l == '-f90': + strictf77 = 0 + sourcecodeform = 'free' + skipemptyends = 1 + elif l == '-h': + f2 = 1 + elif l == '-show': + showblocklist = 1 + elif l == '-m': + f3 = 1 + elif l[0] == '-': + errmess('Unknown option %s\n' % repr(l)) + elif f2: + f2 = 0 + pyffilename = l + elif f3: + f3 = 0 + f77modulename = l + elif f: + try: + open(l).close() + files.append(l) + except OSError as detail: + errmess(f'OSError: {detail!s}\n') + else: + funcs.append(l) + if not strictf77 and f77modulename and not skipemptyends: + outmess("""\ + Warning: You have specified module name for non Fortran 77 code that + should not need one (expect if you are scanning F90 code for non + module blocks but then you should use flag -skipemptyends and also + be sure that the files do not contain programs without program + statement). +""", 0) + + postlist = crackfortran(files) + if pyffilename: + outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0) + pyf = crack2fortran(postlist) + with open(pyffilename, 'w') as f: + f.write(pyf) + if showblocklist: + show(postlist) diff --git a/phivenv/Lib/site-packages/numpy/f2py/diagnose.py b/phivenv/Lib/site-packages/numpy/f2py/diagnose.py new file mode 100644 index 0000000000000000000000000000000000000000..a42282b1747cf51a635cf82ee113d2d82c8b2d8f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/diagnose.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python3 +import os +import sys +import tempfile + + +def run_command(cmd): + print('Running %r:' % (cmd)) + os.system(cmd) + print('------') + + +def run(): + _path = os.getcwd() + os.chdir(tempfile.gettempdir()) + print('------') + print('os.name=%r' % (os.name)) + print('------') + print('sys.platform=%r' % (sys.platform)) + print('------') + print('sys.version:') + print(sys.version) + print('------') + print('sys.prefix:') + print(sys.prefix) + print('------') + print('sys.path=%r' % (':'.join(sys.path))) + print('------') + + try: + import numpy + has_newnumpy = 1 + except ImportError as e: + print('Failed to import new numpy:', e) + has_newnumpy = 0 + + try: + from numpy.f2py import f2py2e + has_f2py2e = 1 + except ImportError as e: + print('Failed to import f2py2e:', e) + has_f2py2e = 0 + + try: + import numpy.distutils + has_numpy_distutils = 2 + except ImportError: + try: + import numpy_distutils + has_numpy_distutils = 1 + except ImportError as e: + print('Failed to import numpy_distutils:', e) + has_numpy_distutils = 0 + + if has_newnumpy: + try: + print('Found new numpy version %r in %s' % + (numpy.__version__, numpy.__file__)) + except Exception as msg: + print('error:', msg) + print('------') + + if has_f2py2e: + try: + print('Found f2py2e version %r in %s' % + (f2py2e.__version__.version, f2py2e.__file__)) + except Exception as msg: + print('error:', msg) + print('------') + + if has_numpy_distutils: + try: + if has_numpy_distutils == 2: + print('Found numpy.distutils version %r in %r' % ( + numpy.distutils.__version__, + numpy.distutils.__file__)) + else: + print('Found numpy_distutils version %r in %r' % ( + numpy_distutils.numpy_distutils_version.numpy_distutils_version, + numpy_distutils.__file__)) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 1: + print( + 'Importing numpy_distutils.command.build_flib ...', end=' ') + import numpy_distutils.command.build_flib as build_flib + print('ok') + print('------') + try: + print( + 'Checking availability of supported Fortran compilers:') + for compiler_class in build_flib.all_compilers: + compiler_class(verbose=1).is_available() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print( + 'error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)') + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.fcompiler ...', end=' ') + import numpy.distutils.fcompiler as fcompiler + else: + print('Importing numpy_distutils.fcompiler ...', end=' ') + import numpy_distutils.fcompiler as fcompiler + print('ok') + print('------') + try: + print('Checking availability of supported Fortran compilers:') + fcompiler.show_fcompilers() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.cpuinfo ...', end=' ') + from numpy.distutils.cpuinfo import cpuinfo + print('ok') + print('------') + else: + try: + print( + 'Importing numpy_distutils.command.cpuinfo ...', end=' ') + from numpy_distutils.command.cpuinfo import cpuinfo + print('ok') + print('------') + except Exception as msg: + print('error:', msg, '(ignore it)') + print('Importing numpy_distutils.cpuinfo ...', end=' ') + from numpy_distutils.cpuinfo import cpuinfo + print('ok') + print('------') + cpu = cpuinfo() + print('CPU information:', end=' ') + for name in dir(cpuinfo): + if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): + print(name[1:], end=' ') + print('------') + except Exception as msg: + print('error:', msg) + print('------') + os.chdir(_path) +if __name__ == "__main__": + run() diff --git a/phivenv/Lib/site-packages/numpy/f2py/f2py2e.py b/phivenv/Lib/site-packages/numpy/f2py/f2py2e.py new file mode 100644 index 0000000000000000000000000000000000000000..582c81330706b8cb5c41be3bf210e10d99022bad --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/f2py2e.py @@ -0,0 +1,772 @@ +#!/usr/bin/env python3 +""" + +f2py2e - Fortran to Python C/API generator. 2nd Edition. + See __usage__ below. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import sys +import os +import pprint +import re +from pathlib import Path +from itertools import dropwhile +import argparse +import copy + +from . import crackfortran +from . import rules +from . import cb_rules +from . import auxfuncs +from . import cfuncs +from . import f90mod_rules +from . import __version__ +from . import capi_maps +from .cfuncs import errmess +from numpy.f2py._backends import f2py_build_generator + +f2py_version = __version__.version +numpy_version = __version__.version + +# outmess=sys.stdout.write +show = pprint.pprint +outmess = auxfuncs.outmess +MESON_ONLY_VER = (sys.version_info >= (3, 12)) + +__usage__ =\ +f"""Usage: + +1) To construct extension module sources: + + f2py [] [[[only:]||[skip:]] \\ + ] \\ + [: ...] + +2) To compile fortran files and build extension modules: + + f2py -c [, , ] + +3) To generate signature files: + + f2py -h ...< same options as in (1) > + +Description: This program generates a Python C/API file (module.c) + that contains wrappers for given fortran functions so that they + can be called from Python. With the -c option the corresponding + extension modules are built. + +Options: + + -h Write signatures of the fortran routines to file + and exit. You can then edit and use it instead + of . If ==stdout then the + signatures are printed to stdout. + Names of fortran routines for which Python C/API + functions will be generated. Default is all that are found + in . + Paths to fortran/signature files that will be scanned for + in order to determine their signatures. + skip: Ignore fortran functions that follow until `:'. + only: Use only fortran functions that follow until `:'. + : Get back to mode. + + -m Name of the module; f2py generates a Python/C API + file module.c or extension module . + Default is 'untitled'. + + '-include

' Writes additional headers in the C wrapper, can be passed + multiple times, generates #include
each time. + + --[no-]lower Do [not] lower the cases in . By default, + --lower is assumed with -h key, and --no-lower without -h key. + + --build-dir All f2py generated files are created in . + Default is tempfile.mkdtemp(). + + --overwrite-signature Overwrite existing signature file. + + --[no-]latex-doc Create (or not) module.tex. + Default is --no-latex-doc. + --short-latex Create 'incomplete' LaTeX document (without commands + \\documentclass, \\tableofcontents, and \\begin{{document}}, + \\end{{document}}). + + --[no-]rest-doc Create (or not) module.rst. + Default is --no-rest-doc. + + --debug-capi Create C/API code that reports the state of the wrappers + during runtime. Useful for debugging. + + --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77 + functions. --wrap-functions is default because it ensures + maximum portability/compiler independence. + + --include-paths ::... Search include files from the given + directories. + + --help-link [..] List system resources found by system_info.py. See also + --link- switch below. [..] is optional list + of resources names. E.g. try 'f2py --help-link lapack_opt'. + + --f2cmap Load Fortran-to-Python KIND specification from the given + file. Default: .f2py_f2cmap in current directory. + + --quiet Run quietly. + --verbose Run with extra verbosity. + --skip-empty-wrappers Only generate wrapper files when needed. + -v Print f2py version ID and exit. + + +build backend options (only effective with -c) +[NO_MESON] is used to indicate an option not meant to be used +with the meson backend or above Python 3.12: + + --fcompiler= Specify Fortran compiler type by vendor [NO_MESON] + --compiler= Specify distutils C compiler type [NO_MESON] + + --help-fcompiler List available Fortran compilers and exit [NO_MESON] + --f77exec= Specify the path to F77 compiler [NO_MESON] + --f90exec= Specify the path to F90 compiler [NO_MESON] + --f77flags= Specify F77 compiler flags + --f90flags= Specify F90 compiler flags + --opt= Specify optimization flags [NO_MESON] + --arch= Specify architecture specific optimization flags [NO_MESON] + --noopt Compile without optimization [NO_MESON] + --noarch Compile without arch-dependent optimization [NO_MESON] + --debug Compile with debugging information + + --dep + Specify a meson dependency for the module. This may + be passed multiple times for multiple dependencies. + Dependencies are stored in a list for further processing. + + Example: --dep lapack --dep scalapack + This will identify "lapack" and "scalapack" as dependencies + and remove them from argv, leaving a dependencies list + containing ["lapack", "scalapack"]. + + --backend + Specify the build backend for the compilation process. + The supported backends are 'meson' and 'distutils'. + If not specified, defaults to 'distutils'. On + Python 3.12 or higher, the default is 'meson'. + +Extra options (only effective with -c): + + --link- Link extension module with as defined + by numpy.distutils/system_info.py. E.g. to link + with optimized LAPACK libraries (vecLib on MacOSX, + ATLAS elsewhere), use --link-lapack_opt. + See also --help-link switch. [NO_MESON] + + -L/path/to/lib/ -l + -D -U + -I/path/to/include/ + .o .so .a + + Using the following macros may be required with non-gcc Fortran + compilers: + -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN + + When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY + interface is printed out at exit (platforms: Linux). + + When using -DF2PY_REPORT_ON_ARRAY_COPY=, a message is + sent to stderr whenever F2PY interface makes a copy of an + array. Integer sets the threshold for array sizes when + a message should be shown. + +Version: {f2py_version} +numpy Version: {numpy_version} +License: NumPy license (see LICENSE.txt in the NumPy source code) +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +https://numpy.org/doc/stable/f2py/index.html\n""" + + +def scaninputline(inputline): + files, skipfuncs, onlyfuncs, debug = [], [], [], [] + f, f2, f3, f5, f6, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0 + verbose = 1 + emptygen = True + dolc = -1 + dolatexdoc = 0 + dorestdoc = 0 + wrapfuncs = 1 + buildpath = '.' + include_paths, inputline = get_includes(inputline) + signsfile, modulename = None, None + options = {'buildpath': buildpath, + 'coutput': None, + 'f2py_wrapper_output': None} + for l in inputline: + if l == '': + pass + elif l == 'only:': + f = 0 + elif l == 'skip:': + f = -1 + elif l == ':': + f = 1 + elif l[:8] == '--debug-': + debug.append(l[8:]) + elif l == '--lower': + dolc = 1 + elif l == '--build-dir': + f6 = 1 + elif l == '--no-lower': + dolc = 0 + elif l == '--quiet': + verbose = 0 + elif l == '--verbose': + verbose += 1 + elif l == '--latex-doc': + dolatexdoc = 1 + elif l == '--no-latex-doc': + dolatexdoc = 0 + elif l == '--rest-doc': + dorestdoc = 1 + elif l == '--no-rest-doc': + dorestdoc = 0 + elif l == '--wrap-functions': + wrapfuncs = 1 + elif l == '--no-wrap-functions': + wrapfuncs = 0 + elif l == '--short-latex': + options['shortlatex'] = 1 + elif l == '--coutput': + f8 = 1 + elif l == '--f2py-wrapper-output': + f9 = 1 + elif l == '--f2cmap': + f10 = 1 + elif l == '--overwrite-signature': + options['h-overwrite'] = 1 + elif l == '-h': + f2 = 1 + elif l == '-m': + f3 = 1 + elif l[:2] == '-v': + print(f2py_version) + sys.exit() + elif l == '--show-compilers': + f5 = 1 + elif l[:8] == '-include': + cfuncs.outneeds['userincludes'].append(l[9:-1]) + cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:] + elif l == '--skip-empty-wrappers': + emptygen = False + elif l[0] == '-': + errmess('Unknown option %s\n' % repr(l)) + sys.exit() + elif f2: + f2 = 0 + signsfile = l + elif f3: + f3 = 0 + modulename = l + elif f6: + f6 = 0 + buildpath = l + elif f8: + f8 = 0 + options["coutput"] = l + elif f9: + f9 = 0 + options["f2py_wrapper_output"] = l + elif f10: + f10 = 0 + options["f2cmap_file"] = l + elif f == 1: + try: + with open(l): + pass + files.append(l) + except OSError as detail: + errmess(f'OSError: {detail!s}. Skipping file "{l!s}".\n') + elif f == -1: + skipfuncs.append(l) + elif f == 0: + onlyfuncs.append(l) + if not f5 and not files and not modulename: + print(__usage__) + sys.exit() + if not os.path.isdir(buildpath): + if not verbose: + outmess('Creating build directory %s\n' % (buildpath)) + os.mkdir(buildpath) + if signsfile: + signsfile = os.path.join(buildpath, signsfile) + if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: + errmess( + 'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile)) + sys.exit() + + options['emptygen'] = emptygen + options['debug'] = debug + options['verbose'] = verbose + if dolc == -1 and not signsfile: + options['do-lower'] = 0 + else: + options['do-lower'] = dolc + if modulename: + options['module'] = modulename + if signsfile: + options['signsfile'] = signsfile + if onlyfuncs: + options['onlyfuncs'] = onlyfuncs + if skipfuncs: + options['skipfuncs'] = skipfuncs + options['dolatexdoc'] = dolatexdoc + options['dorestdoc'] = dorestdoc + options['wrapfuncs'] = wrapfuncs + options['buildpath'] = buildpath + options['include_paths'] = include_paths + options.setdefault('f2cmap_file', None) + return files, options + + +def callcrackfortran(files, options): + rules.options = options + crackfortran.debug = options['debug'] + crackfortran.verbose = options['verbose'] + if 'module' in options: + crackfortran.f77modulename = options['module'] + if 'skipfuncs' in options: + crackfortran.skipfuncs = options['skipfuncs'] + if 'onlyfuncs' in options: + crackfortran.onlyfuncs = options['onlyfuncs'] + crackfortran.include_paths[:] = options['include_paths'] + crackfortran.dolowercase = options['do-lower'] + postlist = crackfortran.crackfortran(files) + if 'signsfile' in options: + outmess('Saving signatures to file "%s"\n' % (options['signsfile'])) + pyf = crackfortran.crack2fortran(postlist) + if options['signsfile'][-6:] == 'stdout': + sys.stdout.write(pyf) + else: + with open(options['signsfile'], 'w') as f: + f.write(pyf) + if options["coutput"] is None: + for mod in postlist: + mod["coutput"] = "%smodule.c" % mod["name"] + else: + for mod in postlist: + mod["coutput"] = options["coutput"] + if options["f2py_wrapper_output"] is None: + for mod in postlist: + mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"] + else: + for mod in postlist: + mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] + return postlist + + +def buildmodules(lst): + cfuncs.buildcfuncs() + outmess('Building modules...\n') + modules, mnames, isusedby = [], [], {} + for item in lst: + if '__user__' in item['name']: + cb_rules.buildcallbacks(item) + else: + if 'use' in item: + for u in item['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(item['name']) + modules.append(item) + mnames.append(item['name']) + ret = {} + for module, name in zip(modules, mnames): + if name in isusedby: + outmess('\tSkipping module "%s" which is used by %s.\n' % ( + name, ','.join('"%s"' % s for s in isusedby[name]))) + else: + um = [] + if 'use' in module: + for u in module['use'].keys(): + if u in isusedby and u in mnames: + um.append(modules[mnames.index(u)]) + else: + outmess( + f'\tModule "{name}" uses nonexisting "{u}" ' + 'which will be ignored.\n') + ret[name] = {} + dict_append(ret[name], rules.buildmodule(module, um)) + return ret + + +def dict_append(d_out, d_in): + for (k, v) in d_in.items(): + if k not in d_out: + d_out[k] = [] + if isinstance(v, list): + d_out[k] = d_out[k] + v + else: + d_out[k].append(v) + + +def run_main(comline_list): + """ + Equivalent to running:: + + f2py + + where ``=string.join(,' ')``, but in Python. Unless + ``-h`` is used, this function returns a dictionary containing + information on generated modules and their dependencies on source + files. + + You cannot build extension modules with this function, that is, + using ``-c`` is not allowed. Use the ``compile`` command instead. + + Examples + -------- + The command ``f2py -m scalar scalar.f`` can be executed from Python as + follows. + + .. literalinclude:: ../../source/f2py/code/results/run_main_session.dat + :language: python + + """ + crackfortran.reset_global_f2py_vars() + f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__)) + fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h') + fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c') + # gh-22819 -- begin + parser = make_f2py_compile_parser() + args, comline_list = parser.parse_known_args(comline_list) + pyf_files, _ = filter_files("", "[.]pyf([.]src|)", comline_list) + # Checks that no existing modulename is defined in a pyf file + # TODO: Remove all this when scaninputline is replaced + if args.module_name: + if "-h" in comline_list: + modname = ( + args.module_name + ) # Directly use from args when -h is present + else: + modname = validate_modulename( + pyf_files, args.module_name + ) # Validate modname when -h is not present + comline_list += ['-m', modname] # needed for the rest of scaninputline + # gh-22819 -- end + files, options = scaninputline(comline_list) + auxfuncs.options = options + capi_maps.load_f2cmap_file(options['f2cmap_file']) + postlist = callcrackfortran(files, options) + isusedby = {} + for plist in postlist: + if 'use' in plist: + for u in plist['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(plist['name']) + for plist in postlist: + if plist['block'] == 'python module' and '__user__' in plist['name']: + if plist['name'] in isusedby: + # if not quiet: + outmess( + f'Skipping Makefile build for module "{plist["name"]}" ' + 'which is used by {}\n'.format( + ','.join(f'"{s}"' for s in isusedby[plist['name']]))) + if 'signsfile' in options: + if options['verbose'] > 1: + outmess( + 'Stopping. Edit the signature file and then run f2py on the signature file: ') + outmess('%s %s\n' % + (os.path.basename(sys.argv[0]), options['signsfile'])) + return + for plist in postlist: + if plist['block'] != 'python module': + if 'python module' not in options: + errmess( + 'Tip: If your original code is Fortran source then you must use -m option.\n') + raise TypeError('All blocks must be python module blocks but got %s' % ( + repr(plist['block']))) + auxfuncs.debugoptions = options['debug'] + f90mod_rules.options = options + auxfuncs.wrapfuncs = options['wrapfuncs'] + + ret = buildmodules(postlist) + + for mn in ret.keys(): + dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc}) + return ret + + +def filter_files(prefix, suffix, files, remove_prefix=None): + """ + Filter files by prefix and suffix. + """ + filtered, rest = [], [] + match = re.compile(prefix + r'.*' + suffix + r'\Z').match + if remove_prefix: + ind = len(prefix) + else: + ind = 0 + for file in [x.strip() for x in files]: + if match(file): + filtered.append(file[ind:]) + else: + rest.append(file) + return filtered, rest + + +def get_prefix(module): + p = os.path.dirname(os.path.dirname(module.__file__)) + return p + + +class CombineIncludePaths(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + include_paths_set = set(getattr(namespace, 'include_paths', []) or []) + if option_string == "--include_paths": + outmess("Use --include-paths or -I instead of --include_paths which will be removed") + if option_string == "--include-paths" or option_string == "--include_paths": + include_paths_set.update(values.split(':')) + else: + include_paths_set.add(values) + setattr(namespace, 'include_paths', list(include_paths_set)) + +def include_parser(): + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("-I", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--include-paths", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--include_paths", dest="include_paths", action=CombineIncludePaths) + return parser + +def get_includes(iline): + iline = (' '.join(iline)).split() + parser = include_parser() + args, remain = parser.parse_known_args(iline) + ipaths = args.include_paths + if args.include_paths is None: + ipaths = [] + return ipaths, remain + +def make_f2py_compile_parser(): + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("--dep", action="append", dest="dependencies") + parser.add_argument("--backend", choices=['meson', 'distutils'], default='distutils') + parser.add_argument("-m", dest="module_name") + return parser + +def preparse_sysargv(): + # To keep backwards bug compatibility, newer flags are handled by argparse, + # and `sys.argv` is passed to the rest of `f2py` as is. + parser = make_f2py_compile_parser() + + args, remaining_argv = parser.parse_known_args() + sys.argv = [sys.argv[0]] + remaining_argv + + backend_key = args.backend + if MESON_ONLY_VER and backend_key == 'distutils': + outmess("Cannot use distutils backend with Python>=3.12," + " using meson backend instead.\n") + backend_key = "meson" + + return { + "dependencies": args.dependencies or [], + "backend": backend_key, + "modulename": args.module_name, + } + +def run_compile(): + """ + Do it all in one call! + """ + import tempfile + + # Collect dependency flags, preprocess sys.argv + argy = preparse_sysargv() + modulename = argy["modulename"] + if modulename is None: + modulename = 'untitled' + dependencies = argy["dependencies"] + backend_key = argy["backend"] + build_backend = f2py_build_generator(backend_key) + + i = sys.argv.index('-c') + del sys.argv[i] + + remove_build_dir = 0 + try: + i = sys.argv.index('--build-dir') + except ValueError: + i = None + if i is not None: + build_dir = sys.argv[i + 1] + del sys.argv[i + 1] + del sys.argv[i] + else: + remove_build_dir = 1 + build_dir = tempfile.mkdtemp() + + _reg1 = re.compile(r'--link-') + sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags] + if sysinfo_flags: + sysinfo_flags = [f[7:] for f in sysinfo_flags] + + _reg2 = re.compile( + r'--((no-|)(wrap-functions|lower)|debug-capi|quiet|skip-empty-wrappers)|-include') + f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] + f2py_flags2 = [] + fl = 0 + for a in sys.argv[1:]: + if a in ['only:', 'skip:']: + fl = 1 + elif a == ':': + fl = 0 + if fl or a == ':': + f2py_flags2.append(a) + if f2py_flags2 and f2py_flags2[-1] != ':': + f2py_flags2.append(':') + f2py_flags.extend(f2py_flags2) + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] + _reg3 = re.compile( + r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') + flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in flib_flags] + # TODO: Once distutils is dropped completely, i.e. min_ver >= 3.12, unify into --fflags + reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') + reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') + fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] + distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] + if not (MESON_ONLY_VER or backend_key == 'meson'): + fc_flags.extend(distutils_flags) + sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] + + del_list = [] + for s in flib_flags: + v = '--fcompiler=' + if s[:len(v)] == v: + if MESON_ONLY_VER or backend_key == 'meson': + outmess( + "--fcompiler cannot be used with meson," + "set compiler with the FC environment variable\n" + ) + else: + from numpy.distutils import fcompiler + fcompiler.load_all_fcompiler_classes() + allowed_keys = list(fcompiler.fcompiler_class.keys()) + nv = ov = s[len(v):].lower() + if ov not in allowed_keys: + vmap = {} # XXX + try: + nv = vmap[ov] + except KeyError: + if ov not in vmap.values(): + print('Unknown vendor: "%s"' % (s[len(v):])) + nv = ov + i = flib_flags.index(s) + flib_flags[i] = '--fcompiler=' + nv + continue + for s in del_list: + i = flib_flags.index(s) + del flib_flags[i] + assert len(flib_flags) <= 2, repr(flib_flags) + + _reg5 = re.compile(r'--(verbose)') + setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in setup_flags] + + if '--quiet' in f2py_flags: + setup_flags.append('--quiet') + + # Ugly filter to remove everything but sources + sources = sys.argv[1:] + f2cmapopt = '--f2cmap' + if f2cmapopt in sys.argv: + i = sys.argv.index(f2cmapopt) + f2py_flags.extend(sys.argv[i:i + 2]) + del sys.argv[i + 1], sys.argv[i] + sources = sys.argv[1:] + + pyf_files, _sources = filter_files("", "[.]pyf([.]src|)", sources) + sources = pyf_files + _sources + modulename = validate_modulename(pyf_files, modulename) + extra_objects, sources = filter_files('', '[.](o|a|so|dylib)', sources) + library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1) + libraries, sources = filter_files('-l', '', sources, remove_prefix=1) + undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1) + define_macros, sources = filter_files('-D', '', sources, remove_prefix=1) + for i in range(len(define_macros)): + name_value = define_macros[i].split('=', 1) + if len(name_value) == 1: + name_value.append(None) + if len(name_value) == 2: + define_macros[i] = tuple(name_value) + else: + print('Invalid use of -D:', name_value) + + # Construct wrappers / signatures / things + if backend_key == 'meson': + if not pyf_files: + outmess('Using meson backend\nWill pass --lower to f2py\nSee https://numpy.org/doc/stable/f2py/buildtools/meson.html\n') + f2py_flags.append('--lower') + run_main(f" {' '.join(f2py_flags)} -m {modulename} {' '.join(sources)}".split()) + else: + run_main(f" {' '.join(f2py_flags)} {' '.join(pyf_files)}".split()) + + # Order matters here, includes are needed for run_main above + include_dirs, sources = get_includes(sources) + # Now use the builder + builder = build_backend( + modulename, + sources, + extra_objects, + build_dir, + include_dirs, + library_dirs, + libraries, + define_macros, + undef_macros, + f2py_flags, + sysinfo_flags, + fc_flags, + flib_flags, + setup_flags, + remove_build_dir, + {"dependencies": dependencies}, + ) + + builder.compile() + + +def validate_modulename(pyf_files, modulename='untitled'): + if len(pyf_files) > 1: + raise ValueError("Only one .pyf file per call") + if pyf_files: + pyff = pyf_files[0] + pyf_modname = auxfuncs.get_f2py_modulename(pyff) + if modulename != pyf_modname: + outmess( + f"Ignoring -m {modulename}.\n" + f"{pyff} defines {pyf_modname} to be the modulename.\n" + ) + modulename = pyf_modname + return modulename + +def main(): + if '--help-link' in sys.argv[1:]: + sys.argv.remove('--help-link') + if MESON_ONLY_VER: + outmess("Use --dep for meson builds\n") + else: + from numpy.distutils.system_info import show_all + show_all() + return + + if '-c' in sys.argv[1:]: + run_compile() + else: + run_main(sys.argv[1:]) diff --git a/phivenv/Lib/site-packages/numpy/f2py/f90mod_rules.py b/phivenv/Lib/site-packages/numpy/f2py/f90mod_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..a841e1bc77877d2fdceba7dafbe3d7e0018a335f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/f90mod_rules.py @@ -0,0 +1,267 @@ +""" +Build F90 module support for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +__version__ = "$Revision: 1.27 $"[10:-1] + +f2py_version = 'See `f2py -v`' + +import numpy as np + +from . import capi_maps +from . import func2subr +from .crackfortran import undo_rmbadname, undo_rmbadname1 + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + +options = {} + + +def findf90modules(m): + if ismodule(m): + return [m] + if not hasbody(m): + return [] + ret = [] + for b in m['body']: + if ismodule(b): + ret.append(b) + else: + ret = ret + findf90modules(b) + return ret + +fgetdims1 = """\ + external f2pysetdata + logical ns + integer r,i + integer(%d) s(*) + ns = .FALSE. + if (allocated(d)) then + do i=1,r + if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then + ns = .TRUE. + end if + end do + if (ns) then + deallocate(d) + end if + end if + if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize + +fgetdims2 = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + end if + flag = 1 + call f2pysetdata(d,allocated(d))""" + +fgetdims2_sa = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + !s(r) must be equal to len(d(1)) + end if + flag = 2 + call f2pysetdata(d,allocated(d))""" + + +def buildhooks(pymod): + from . import rules + ret = {'f90modhooks': [], 'initf90modhooks': [], 'body': [], + 'need': ['F_FUNC', 'arrayobject.h'], + 'separatorsfor': {'includes0': '\n', 'includes': '\n'}, + 'docs': ['"Fortran 90/95 modules:\\n"'], + 'latexdoc': []} + fhooks = [''] + + def fadd(line, s=fhooks): + s[0] = '%s\n %s' % (s[0], line) + doc = [''] + + def dadd(line, s=doc): + s[0] = '%s\n%s' % (s[0], line) + + usenames = getuseblocks(pymod) + for m in findf90modules(pymod): + contains_functions_or_subroutines = any( + item for item in m["body"] if item["block"] in ["function", "subroutine"] + ) + sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [ + m['name']], [] + sargsp = [] + ifargs = [] + mfargs = [] + if hasbody(m): + for b in m['body']: + notvars.append(b['name']) + for n in m['vars'].keys(): + var = m['vars'][n] + if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): + onlyvars.append(n) + mfargs.append(n) + outmess('\t\tConstructing F90 module support for "%s"...\n' % + (m['name'])) + if m['name'] in usenames and not contains_functions_or_subroutines: + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use'...\n") + continue + if onlyvars: + outmess('\t\t Variables: %s\n' % (' '.join(onlyvars))) + chooks = [''] + + def cadd(line, s=chooks): + s[0] = '%s\n%s' % (s[0], line) + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = '%s\n%s' % (s[0], line) + + vrd = capi_maps.modsign2map(m) + cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name'])) + dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name'])) + if hasnote(m): + note = m['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd(note) + if onlyvars: + dadd('\\begin{description}') + for n in onlyvars: + var = m['vars'][n] + modobjs.append(n) + ct = capi_maps.getctype(var) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, var) + dms = dm['dims'].replace('*', '-1').strip() + dms = dms.replace(':', '-1').strip() + if not dms: + dms = '-1' + use_fgetdims2 = fgetdims2 + cadd('\t{"%s",%s,{{%s}},%s, %s},' % + (undo_rmbadname1(n), dm['rank'], dms, at, + capi_maps.get_elsize(var))) + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, var))) + if hasnote(var): + note = var['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd('--- %s' % (note)) + if isallocatable(var): + fargs.append('f2py_%s_getdims_%s' % (m['name'], n)) + efargs.append(fargs[-1]) + sargs.append( + 'void (*%s)(int*,npy_intp*,void(*)(char*,npy_intp*),int*)' % (n)) + sargsp.append('void (*)(int*,npy_intp*,void(*)(char*,npy_intp*),int*)') + iadd('\tf2py_%s_def[i_f2py++].func = %s;' % (m['name'], n)) + fadd('subroutine %s(r,s,f2pysetdata,flag)' % (fargs[-1])) + fadd('use %s, only: d => %s\n' % + (m['name'], undo_rmbadname1(n))) + fadd('integer flag\n') + fhooks[0] = fhooks[0] + fgetdims1 + dms = range(1, int(dm['rank']) + 1) + fadd(' allocate(d(%s))\n' % + (','.join(['s(%s)' % i for i in dms]))) + fhooks[0] = fhooks[0] + use_fgetdims2 + fadd('end subroutine %s' % (fargs[-1])) + else: + fargs.append(n) + sargs.append('char *%s' % (n)) + sargsp.append('char*') + iadd('\tf2py_%s_def[i_f2py++].data = %s;' % (m['name'], n)) + if onlyvars: + dadd('\\end{description}') + if hasbody(m): + for b in m['body']: + if not isroutine(b): + outmess("f90mod_rules.buildhooks:" + f" skipping {b['block']} {b['name']}\n") + continue + modobjs.append('%s()' % (b['name'])) + b['modulename'] = m['name'] + api, wrap = rules.buildapi(b) + if isfunction(b): + fhooks[0] = fhooks[0] + wrap + fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) + ifargs.append(func2subr.createfuncwrapper(b, signature=1)) + else: + if wrap: + fhooks[0] = fhooks[0] + wrap + fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) + ifargs.append( + func2subr.createsubrwrapper(b, signature=1)) + else: + fargs.append(b['name']) + mfargs.append(fargs[-1]) + api['externroutines'] = [] + ar = applyrules(api, vrd) + ar['docs'] = [] + ar['docshort'] = [] + ret = dictappend(ret, ar) + cadd(('\t{"%s",-1,{{-1}},0,0,NULL,(void *)' + 'f2py_rout_#modulename#_%s_%s,' + 'doc_f2py_rout_#modulename#_%s_%s},') + % (b['name'], m['name'], b['name'], m['name'], b['name'])) + sargs.append('char *%s' % (b['name'])) + sargsp.append('char *') + iadd('\tf2py_%s_def[i_f2py++].data = %s;' % + (m['name'], b['name'])) + cadd('\t{NULL}\n};\n') + iadd('}') + ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % ( + m['name'], ','.join(sargs), ihooks[0]) + if '_' in m['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));' + % (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) + iadd('static void f2py_init_%s(void) {' % (m['name'])) + iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, m['name'], m['name'].upper(), m['name'])) + iadd('}\n') + ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks + ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( + m['name'], m['name'], m['name'])] + ret['initf90modhooks'] + fadd('') + fadd('subroutine f2pyinit%s(f2pysetupfunc)' % (m['name'])) + if mfargs: + for a in undo_rmbadname(mfargs): + fadd('use %s, only : %s' % (m['name'], a)) + if ifargs: + fadd(' '.join(['interface'] + ifargs)) + fadd('end interface') + fadd('external f2pysetupfunc') + if efargs: + for a in undo_rmbadname(efargs): + fadd('external %s' % (a)) + fadd('call f2pysetupfunc(%s)' % (','.join(undo_rmbadname(fargs)))) + fadd('end subroutine f2pyinit%s\n' % (m['name'])) + + dadd('\n'.join(ret['latexdoc']).replace( + r'\subsection{', r'\subsubsection{')) + + ret['latexdoc'] = [] + ret['docs'].append('"\t%s --- %s"' % (m['name'], + ','.join(undo_rmbadname(modobjs)))) + + ret['routine_defs'] = '' + ret['doc'] = [] + ret['docshort'] = [] + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fhooks[0] diff --git a/phivenv/Lib/site-packages/numpy/f2py/func2subr.py b/phivenv/Lib/site-packages/numpy/f2py/func2subr.py new file mode 100644 index 0000000000000000000000000000000000000000..1c5b9efff57844a10f811c94dc19a7cbe218e432 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/func2subr.py @@ -0,0 +1,323 @@ +""" + +Rules for building C/API module with f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import copy + +from .auxfuncs import ( + getfortranname, isexternal, isfunction, isfunction_wrap, isintent_in, + isintent_out, islogicalfunction, ismoduleroutine, isscalar, + issubroutine, issubroutine_wrap, outmess, show +) + +from ._isocbind import isoc_kindmap + +def var2fixfortran(vars, a, fa=None, f90mode=None): + if fa is None: + fa = a + if a not in vars: + show(vars) + outmess('var2fixfortran: No definition for argument "%s".\n' % a) + return '' + if 'typespec' not in vars[a]: + show(vars[a]) + outmess('var2fixfortran: No typespec for argument "%s".\n' % a) + return '' + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = '%s(%s)' % (vardef, vars[a]['typename']) + selector = {} + lk = '' + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + lk = 'kind' + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + lk = 'len' + if '*' in selector: + if f90mode: + if selector['*'] in ['*', ':', '(*)']: + vardef = '%s(len=*)' % (vardef) + else: + vardef = '%s(%s=%s)' % (vardef, lk, selector['*']) + else: + if selector['*'] in ['*', ':']: + vardef = '%s*(%s)' % (vardef, selector['*']) + else: + vardef = '%s*%s' % (vardef, selector['*']) + else: + if 'len' in selector: + vardef = '%s(len=%s' % (vardef, selector['len']) + if 'kind' in selector: + vardef = '%s,kind=%s)' % (vardef, selector['kind']) + else: + vardef = '%s)' % (vardef) + elif 'kind' in selector: + vardef = '%s(kind=%s)' % (vardef, selector['kind']) + + vardef = '%s %s' % (vardef, fa) + if 'dimension' in vars[a]: + vardef = '%s(%s)' % (vardef, ','.join(vars[a]['dimension'])) + return vardef + +def useiso_c_binding(rout): + useisoc = False + for key, value in rout['vars'].items(): + kind_value = value.get('kindselector', {}).get('kind') + if kind_value in isoc_kindmap: + return True + return useisoc + +def createfuncwrapper(rout, signature=0): + assert isfunction(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = 'f2py_%s_d%s' % (a, i) + dv = dict(typespec='integer', intent=['hide']) + dv['='] = 'shape(%s, %s)' % (a, i) + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = '%s\n %s' % (ret[0], line) + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + newname = '%sf2pywrap' % (name) + + if newname not in vars: + vars[newname] = vars[name] + args = [newname] + rout['args'][1:] + else: + args = [newname] + rout['args'] + + l_tmpl = var2fixfortran(vars, name, '@@@NAME@@@', f90mode) + if l_tmpl[:13] == 'character*(*)': + if f90mode: + l_tmpl = 'character(len=10)' + l_tmpl[13:] + else: + l_tmpl = 'character*10' + l_tmpl[13:] + charselect = vars[name]['charselector'] + if charselect.get('*', '') == '(*)': + charselect['*'] = '10' + + l1 = l_tmpl.replace('@@@NAME@@@', newname) + rl = None + + useisoc = useiso_c_binding(rout) + sargs = ', '.join(args) + if f90mode: + # gh-23598 fix warning + # Essentially, this gets called again with modules where the name of the + # function is added to the arguments, which is not required, and removed + sargs = sargs.replace(f"{name}, ", '') + args = [arg for arg in args if arg != name] + rout['args'] = args + add('subroutine f2pywrap_%s_%s (%s)' % + (rout['modulename'], name, sargs)) + if not signature: + add('use %s, only : %s' % (rout['modulename'], fortranname)) + if useisoc: + add('use iso_c_binding') + else: + add('subroutine f2pywrap%s (%s)' % (name, sargs)) + if useisoc: + add('use iso_c_binding') + if not need_interface: + add('external %s' % (fortranname)) + rl = l_tmpl.replace('@@@NAME@@@', '') + ' ' + fortranname + + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' not in line: + add(line) + + args = args[1:] + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add('external %s' % (a)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isintent_in(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + add(l1) + if rl is not None: + add(rl) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + add(rout['saved_interface'].lstrip()) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + if islogicalfunction(rout): + add('%s = .not.(.not.%s(%s))' % (newname, fortranname, sargs)) + else: + add('%s = %s(%s)' % (newname, fortranname, sargs)) + if f90mode: + add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) + else: + add('end') + return ret[0] + + +def createsubrwrapper(rout, signature=0): + assert issubroutine(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = 'f2py_%s_d%s' % (a, i) + dv = dict(typespec='integer', intent=['hide']) + dv['='] = 'shape(%s, %s)' % (a, i) + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = '%s\n %s' % (ret[0], line) + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + + args = rout['args'] + + useisoc = useiso_c_binding(rout) + sargs = ', '.join(args) + if f90mode: + add('subroutine f2pywrap_%s_%s (%s)' % + (rout['modulename'], name, sargs)) + if useisoc: + add('use iso_c_binding') + if not signature: + add('use %s, only : %s' % (rout['modulename'], fortranname)) + else: + add('subroutine f2pywrap%s (%s)' % (name, sargs)) + if useisoc: + add('use iso_c_binding') + if not need_interface: + add('external %s' % (fortranname)) + + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' not in line: + add(line) + + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add('external %s' % (a)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' in line: + continue + add(line) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + add('call %s(%s)' % (fortranname, sargs)) + if f90mode: + add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) + else: + add('end') + return ret[0] + + +def assubr(rout): + if isfunction_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % ( + name, fortranname)) + rout = copy.copy(rout) + fname = name + rname = fname + if 'result' in rout: + rname = rout['result'] + rout['vars'][fname] = rout['vars'][rname] + fvar = rout['vars'][fname] + if not isintent_out(fvar): + if 'intent' not in fvar: + fvar['intent'] = [] + fvar['intent'].append('out') + flag = 1 + for i in fvar['intent']: + if i.startswith('out='): + flag = 0 + break + if flag: + fvar['intent'].append('out=%s' % (rname)) + rout['args'][:] = [fname] + rout['args'] + return rout, createfuncwrapper(rout) + if issubroutine_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' + % (name, fortranname)) + rout = copy.copy(rout) + return rout, createsubrwrapper(rout) + return rout, '' diff --git a/phivenv/Lib/site-packages/numpy/f2py/rules.py b/phivenv/Lib/site-packages/numpy/f2py/rules.py new file mode 100644 index 0000000000000000000000000000000000000000..54eef1a7527526beec43192cc4de193d3c6573a0 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/rules.py @@ -0,0 +1,1568 @@ +#!/usr/bin/env python3 +""" + +Rules for building C/API module with f2py2e. + +Here is a skeleton of a new wrapper function (13Dec2001): + +wrapper_function(args) + declarations + get_python_arguments, say, `a' and `b' + + get_a_from_python + if (successful) { + + get_b_from_python + if (successful) { + + callfortran + if (successful) { + + put_a_to_python + if (successful) { + + put_b_to_python + if (successful) { + + buildvalue = ... + + } + + } + + } + + } + cleanup_b + + } + cleanup_a + + return buildvalue + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import os, sys +import time +import copy +from pathlib import Path + +# __version__.version is now the same as the NumPy version +from . import __version__ + +from .auxfuncs import ( + applyrules, debugcapi, dictappend, errmess, gentitle, getargs2, + hascallstatement, hasexternals, hasinitvalue, hasnote, + hasresultnote, isarray, isarrayofstrings, ischaracter, + ischaracterarray, ischaracter_or_characterarray, iscomplex, + iscomplexarray, iscomplexfunction, iscomplexfunction_warn, + isdummyroutine, isexternal, isfunction, isfunction_wrap, isint1, + isint1array, isintent_aux, isintent_c, isintent_callback, + isintent_copy, isintent_hide, isintent_inout, isintent_nothide, + isintent_out, isintent_overwrite, islogical, islong_complex, + islong_double, islong_doublefunction, islong_long, + islong_longfunction, ismoduleroutine, isoptional, isrequired, + isscalar, issigned_long_longarray, isstring, isstringarray, + isstringfunction, issubroutine, isattr_value, + issubroutine_wrap, isthreadsafe, isunsigned, isunsigned_char, + isunsigned_chararray, isunsigned_long_long, + isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray, + l_and, l_not, l_or, outmess, replace, stripcomma, requiresf90wrapper +) + +from . import capi_maps +from . import cfuncs +from . import common_rules +from . import use_rules +from . import f90mod_rules +from . import func2subr + +f2py_version = __version__.version +numpy_version = __version__.version + +options = {} +sepdict = {} +# for k in ['need_cfuncs']: sepdict[k]=',' +for k in ['decl', + 'frompyobj', + 'cleanupfrompyobj', + 'topyarr', 'method', + 'pyobjfrom', 'closepyobjfrom', + 'freemem', + 'userincludes', + 'includes0', 'includes', 'typedefs', 'typedefs_generated', + 'cppmacros', 'cfuncs', 'callbacks', + 'latexdoc', + 'restdoc', + 'routine_defs', 'externroutines', + 'initf2pywraphooks', + 'commonhooks', 'initcommonhooks', + 'f90modhooks', 'initf90modhooks']: + sepdict[k] = '\n' + +#################### Rules for C/API module ################# + +generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) +module_rules = { + 'modulebody': """\ +/* File: #modulename#module.c + * This file is auto-generated with f2py (version:#f2py_version#). + * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, + * written by Pearu Peterson . + * Generation date: """ + time.asctime(time.gmtime(generationtime)) + """ + * Do not edit this file directly unless you know what you are doing!!! + */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#ifndef PY_SSIZE_T_CLEAN +#define PY_SSIZE_T_CLEAN +#endif /* PY_SSIZE_T_CLEAN */ + +/* Unconditionally included */ +#include +#include + +""" + gentitle("See f2py2e/cfuncs.py: includes") + """ +#includes# +#includes0# + +""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """ +static PyObject *#modulename#_error; +static PyObject *#modulename#_module; + +""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """ +#typedefs# + +""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """ +#typedefs_generated# + +""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """ +#cppmacros# + +""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """ +#cfuncs# + +""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """ +#userincludes# + +""" + gentitle("See f2py2e/capi_rules.py: usercode") + """ +#usercode# + +/* See f2py2e/rules.py */ +#externroutines# + +""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """ +#usercode1# + +""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """ +#callbacks# + +""" + gentitle("See f2py2e/rules.py: buildapi") + """ +#body# + +""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """ +#f90modhooks# + +""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """ + +""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """ +#commonhooks# + +""" + gentitle("See f2py2e/rules.py") + """ + +static FortranDataDef f2py_routine_defs[] = { +#routine_defs# + {NULL} +}; + +static PyMethodDef f2py_module_methods[] = { +#pymethoddef# + {NULL,NULL} +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "#modulename#", + NULL, + -1, + f2py_module_methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_#modulename#(void) { + int i; + PyObject *m,*d, *s, *tmp; + m = #modulename#_module = PyModule_Create(&moduledef); + Py_SET_TYPE(&PyFortran_Type, &PyType_Type); + import_array(); + if (PyErr_Occurred()) + {PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;} + d = PyModule_GetDict(m); + s = PyUnicode_FromString(\"#f2py_version#\"); + PyDict_SetItemString(d, \"__version__\", s); + Py_DECREF(s); + s = PyUnicode_FromString( + \"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); + PyDict_SetItemString(d, \"__doc__\", s); + Py_DECREF(s); + s = PyUnicode_FromString(\"""" + numpy_version + """\"); + PyDict_SetItemString(d, \"__f2py_numpy_version__\", s); + Py_DECREF(s); + #modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); + /* + * Store the error object inside the dict, so that it could get deallocated. + * (in practice, this is a module, so it likely will not and cannot.) + */ + PyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error); + Py_DECREF(#modulename#_error); + for(i=0;f2py_routine_defs[i].name!=NULL;i++) { + tmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]); + PyDict_SetItemString(d, f2py_routine_defs[i].name, tmp); + Py_DECREF(tmp); + } +#initf2pywraphooks# +#initf90modhooks# +#initcommonhooks# +#interface_usercode# + +#ifdef F2PY_REPORT_ATEXIT + if (! PyErr_Occurred()) + on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); +#endif + return m; +} +#ifdef __cplusplus +} +#endif +""", + 'separatorsfor': {'latexdoc': '\n\n', + 'restdoc': '\n\n'}, + 'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n', + '#modnote#\n', + '#latexdoc#'], + 'restdoc': ['Module #modulename#\n' + '=' * 80, + '\n#restdoc#'] +} + +defmod_rules = [ + {'body': '/*eof body*/', + 'method': '/*eof method*/', + 'externroutines': '/*eof externroutines*/', + 'routine_defs': '/*eof routine_defs*/', + 'initf90modhooks': '/*eof initf90modhooks*/', + 'initf2pywraphooks': '/*eof initf2pywraphooks*/', + 'initcommonhooks': '/*eof initcommonhooks*/', + 'latexdoc': '', + 'restdoc': '', + 'modnote': {hasnote: '#note#', l_not(hasnote): ''}, + } +] + +routine_rules = { + 'separatorsfor': sepdict, + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\"; +/* #declfortranroutine# */ +static PyObject *#apiname#(const PyObject *capi_self, + PyObject *capi_args, + PyObject *capi_keywds, + #functype# (*f2py_func)(#callprotoargument#)) { + PyObject * volatile capi_buildvalue = NULL; + volatile int f2py_success = 1; +#decl# + static char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL}; +#usercode# +#routdebugenter# +#ifdef F2PY_REPORT_ATEXIT +f2py_start_clock(); +#endif + if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ + \"#argformat#|#keyformat##xaformat#:#pyname#\",\\ + capi_kwlist#args_capi##keys_capi##keys_xa#))\n return NULL; +#frompyobj# +/*end of frompyobj*/ +#ifdef F2PY_REPORT_ATEXIT +f2py_start_call_clock(); +#endif +#callfortranroutine# +if (PyErr_Occurred()) + f2py_success = 0; +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_call_clock(); +#endif +/*end of callfortranroutine*/ + if (f2py_success) { +#pyobjfrom# +/*end of pyobjfrom*/ + CFUNCSMESS(\"Building return value.\\n\"); + capi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#); +/*closepyobjfrom*/ +#closepyobjfrom# + } /*if (f2py_success) after callfortranroutine*/ +/*cleanupfrompyobj*/ +#cleanupfrompyobj# + if (capi_buildvalue == NULL) { +#routdebugfailure# + } else { +#routdebugleave# + } + CFUNCSMESS(\"Freeing memory.\\n\"); +#freemem# +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_clock(); +#endif + return capi_buildvalue; +} +#endtitle# +""", + 'routine_defs': '#routine_def#', + 'initf2pywraphooks': '#initf2pywraphook#', + 'externroutines': '#declfortranroutine#', + 'doc': '#docreturn##name#(#docsignature#)', + 'docshort': '#docreturn##name#(#docsignatureshort#)', + 'docs': '" #docreturn##name#(#docsignature#)\\n"\n', + 'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'], + 'cppmacros': {debugcapi: '#define DEBUGCFUNCS'}, + 'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n', + """ +\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)} +#routnote# + +#latexdocstrsigns# +"""], + 'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80, + + ] +} + +################## Rules for C/API function ############## + +rout_rules = [ + { # Init + 'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n', + 'routdebugleave': '\n', 'routdebugfailure': '\n', + 'setjmpbuf': ' || ', + 'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n', + 'docstrcbs': '\n', 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '', + 'docsign': '', 'docsignopt': '', 'decl': '/*decl*/', + 'freemem': '/*freemem*/', + 'docsignshort': '', 'docsignoptshort': '', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': '\\nParameters\\n----------', + 'docstropt': '\\nOther Parameters\\n----------------', + 'docstrout': '\\nReturns\\n-------', + 'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'args_capi': '', 'keys_capi': '', 'functype': '', + 'frompyobj': '/*frompyobj*/', + # this list will be reversed + 'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'], + 'pyobjfrom': '/*pyobjfrom*/', + # this list will be reversed + 'closepyobjfrom': ['/*end of closepyobjfrom*/'], + 'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/', + 'routdebugenter': '/*routdebugenter*/', + 'routdebugfailure': '/*routdebugfailure*/', + 'callfortranroutine': '/*callfortranroutine*/', + 'argformat': '', 'keyformat': '', 'need_cfuncs': '', + 'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '', + 'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '', + 'initf2pywraphook': '', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { + 'apiname': 'f2py_rout_#modulename#_#name#', + 'pyname': '#modulename#.#name#', + 'decl': '', + '_check': l_not(ismoduleroutine) + }, { + 'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#', + 'pyname': '#modulename#.#f90modulename#.#name#', + 'decl': '', + '_check': ismoduleroutine + }, { # Subroutine + 'functype': 'void', + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);', + ismoduleroutine: '', + isdummyroutine: '' + }, + 'routine_def': { + l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_FUNC#(#fortranname#,#FORTRANNAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isdummyroutine): + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {hascallstatement: ''' #callstatement#; + /*(*f2py_func)(#callfortran#);*/'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: """ }"""} + ], + '_check': l_and(issubroutine, l_not(issubroutine_wrap)), + }, { # Wrapped function + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': { + l_not(l_or(ismoduleroutine, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_WRAPPEDFUNC#(#name_lower#,#NAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); + s = PyUnicode_FromString("#name#"); + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {hascallstatement: + ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'} + ], + '_check': isfunction_wrap, + }, { # Wrapped subroutine + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': { + l_not(l_or(ismoduleroutine, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_WRAPPEDFUNC#(#name_lower#,#NAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern void #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); + s = PyUnicode_FromString("#name#"); + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {hascallstatement: + ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'} + ], + '_check': issubroutine_wrap, + }, { # Function + 'functype': '#ctype#', + 'docreturn': {l_not(isintent_hide): '#rname#,'}, + 'docstrout': '#pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasresultnote: '--- #resultnote#'}], + 'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\ +#ifdef USESCOMPAQFORTRAN + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); +#else + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +#endif +"""}, + {l_and(debugcapi, l_not(isstringfunction)): """\ + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +"""} + ], + '_check': l_and(isfunction, l_not(isfunction_wrap)) + }, { # Scalar function + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);', + isdummyroutine: '' + }, + 'routine_def': { + l_and(l_not(l_or(ismoduleroutine, isintent_c)), + l_not(isdummyroutine)): + (' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_FUNC#(#fortranname#,#FORTRANNAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},'), + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): + (' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,' + ' (f2py_init_func)#apiname#,doc_#apiname#},'), + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + '(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'decl': [{iscomplexfunction_warn: ' #ctype# #name#_return_value={0,0};', + l_not(iscomplexfunction): ' #ctype# #name#_return_value=0;'}, + {iscomplexfunction: + ' PyObject *#name#_return_value_capi = Py_None;'} + ], + 'callfortranroutine': [ + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {hascallstatement: ''' #callstatement#; +/* #name#_return_value = (*f2py_func)(#callfortran#);*/ +'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' #name#_return_value = (*f2py_func)(#callfortran#);'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, + {l_and(debugcapi, iscomplexfunction) + : ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, + {l_and(debugcapi, l_not(iscomplexfunction)): ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], + 'pyobjfrom': {iscomplexfunction: ' #name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, + 'need': [{l_not(isdummyroutine): 'F_FUNC'}, + {iscomplexfunction: 'pyobj_from_#ctype#1'}, + {islong_longfunction: 'long_long'}, + {islong_doublefunction: 'long_double'}], + 'returnformat': {l_not(isintent_hide): '#rformat#'}, + 'return': {iscomplexfunction: ',#name#_return_value_capi', + l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'}, + '_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap)) + }, { # String function # in use for --no-wrap + 'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' + }, + 'decl': [' #ctype# #name#_return_value = NULL;', + ' int #name#_return_value_len = 0;'], + 'callfortran':'#name#_return_value,#name#_return_value_len,', + 'callfortranroutine':[' #name#_return_value_len = #rlength#;', + ' if ((#name#_return_value = (string)malloc(' + + '#name#_return_value_len+1) == NULL) {', + ' PyErr_SetString(PyExc_MemoryError, \"out of memory\");', + ' f2py_success = 0;', + ' } else {', + " (#name#_return_value)[#name#_return_value_len] = '\\0';", + ' }', + ' if (f2py_success) {', + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + """\ +#ifdef USESCOMPAQFORTRAN + (*f2py_func)(#callcompaqfortran#); +#else + (*f2py_func)(#callfortran#); +#endif +""", + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, + {debugcapi: + ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, + ' } /* if (f2py_success) after (string)malloc */', + ], + 'returnformat': '#rformat#', + 'return': ',#name#_return_value', + 'freemem': ' STRINGFREE(#name#_return_value);', + 'need': ['F_FUNC', '#ctype#', 'STRINGFREE'], + '_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete + }, + { # Debugging + 'routdebugenter': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', + 'routdebugleave': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', + 'routdebugfailure': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', + '_check': debugcapi + } +] + +################ Rules for arguments ################## + +typedef_need_dict = {islong_long: 'long_long', + islong_double: 'long_double', + islong_complex: 'complex_long_double', + isunsigned_char: 'unsigned_char', + isunsigned_short: 'unsigned_short', + isunsigned: 'unsigned', + isunsigned_long_long: 'unsigned_long_long', + isunsigned_chararray: 'unsigned_char', + isunsigned_shortarray: 'unsigned_short', + isunsigned_long_longarray: 'unsigned_long_long', + issigned_long_longarray: 'long_long', + isint1: 'signed_char', + ischaracter_or_characterarray: 'character', + } + +aux_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': [' /* Processing auxiliary variable #varname# */', + {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */', + 'need': typedef_need_dict, + }, + # Scalars (not complex) + { # Common + 'decl': ' #ctype# #varname# = 0;', + 'need': {hasinitvalue: 'math.h'}, + 'frompyobj': {hasinitvalue: ' #varname# = #init#;'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, + { + 'return': ',#varname#', + 'docstrout': '#pydocsignout#', + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': l_and(isscalar, l_not(iscomplex), isintent_out), + }, + # Complex scalars + { # Common + 'decl': ' #ctype# #varname#;', + 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': iscomplex + }, + # String + { # Common + 'decl': [' #ctype# #varname# = NULL;', + ' int slen(#varname#);', + ], + 'need':['len..'], + '_check':isstring + }, + # Array + { # Common + 'decl': [' #ctype# *#varname# = NULL;', + ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + ' const int #varname#_Rank = #rank#;', + ], + 'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': l_or(isunsigned_chararray, isunsigned_char), + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +arg_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': [' /* Processing variable #varname# */', + {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */', + '_depend': '', + 'need': typedef_need_dict, + }, + # Doc signatures + { + 'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'}, + 'docstrout': {isintent_out: '#pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'depend': '' + }, + # Required/Optional arguments + { + 'kwlist': '"#varname#",', + 'docsign': '#varname#,', + '_check': l_and(isintent_nothide, l_not(isoptional)) + }, + { + 'kwlistopt': '"#varname#",', + 'docsignopt': '#varname#=#showinit#,', + 'docsignoptshort': '#varname#,', + '_check': l_and(isintent_nothide, isoptional) + }, + # Docstring/BuildValue + { + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': isintent_out + }, + # Externals (call-back functions) + { # Common + 'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'}, + 'docsignxashort': {isintent_nothide: '#varname#_extra_args,'}, + 'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'}, + 'docstrcbs': '#cbdocstr#', + 'latexdocstrcbs': '\\item[] #cblatexdocstr#', + 'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'}, + 'decl': [' #cbname#_t #varname#_cb = { Py_None, NULL, 0 };', + ' #cbname#_t *#varname#_cb_ptr = &#varname#_cb;', + ' PyTupleObject *#varname#_xa_capi = NULL;', + {l_not(isintent_callback): + ' #cbname#_typedef #varname#_cptr;'} + ], + 'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'}, + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'xaformat': {isintent_nothide: 'O!'}, + 'args_capi': {isrequired: ',&#varname#_cb.capi'}, + 'keys_capi': {isoptional: ',&#varname#_cb.capi'}, + 'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi', + 'setjmpbuf': '(setjmp(#varname#_cb.jmpbuf))', + 'callfortran': {l_not(isintent_callback): '#varname#_cptr,'}, + 'need': ['#cbname#', 'setjmp.h'], + '_check':isexternal + }, + { + 'frompyobj': [{l_not(isintent_callback): """\ +if(F2PyCapsule_Check(#varname#_cb.capi)) { + #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_cb.capi); +} else { + #varname#_cptr = #cbname#; +} +"""}, {isintent_callback: """\ +if (#varname#_cb.capi==Py_None) { + #varname#_cb.capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); + if (#varname#_cb.capi) { + if (#varname#_xa_capi==NULL) { + if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { + PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); + if (capi_tmp) { + #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); + Py_DECREF(capi_tmp); + } + else { + #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); + } + if (#varname#_xa_capi==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); + return NULL; + } + } + } + } + if (#varname#_cb.capi==NULL) { + PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\"); + return NULL; + } +} +"""}, + """\ + if (create_cb_arglist(#varname#_cb.capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#varname#_cb.nofargs,&#varname#_cb.args_capi,\"failed in processing argument list for call-back #varname#.\")) { +""", + {debugcapi: ["""\ + fprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#varname#_cb.nofargs); + CFUNCSMESSPY(\"for #varname#=\",#varname#_cb.capi);""", + {l_not(isintent_callback): """ fprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, + """\ + CFUNCSMESS(\"Saving callback variables for `#varname#`.\\n\"); + #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr);""", + ], + 'cleanupfrompyobj': + """\ + CFUNCSMESS(\"Restoring callback variables for `#varname#`.\\n\"); + #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr); + Py_DECREF(#varname#_cb.args_capi); + }""", + 'need': ['SWAP', 'create_cb_arglist'], + '_check':isexternal, + '_depend':'' + }, + # Scalars (not complex) + { # Common + 'decl': ' #ctype# #varname# = 0;', + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + 'callfortran': {l_or(isintent_c, isattr_value): '#varname#,', l_not(l_or(isintent_c, isattr_value)): '&#varname#,'}, + 'return': {isintent_out: ',#varname#'}, + '_check': l_and(isscalar, l_not(iscomplex)) + }, { + 'need': {hasinitvalue: 'math.h'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': {isintent_inout: """\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); + if (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + '_check': l_and(isscalar, l_not(iscomplex), l_not(isstring), + isintent_nothide) + }, { + 'frompyobj': [ + # hasinitvalue... + # if pyobj is None: + # varname = init + # else + # from_pyobj(varname) + # + # isoptional and noinitvalue... + # if pyobj is not None: + # from_pyobj(varname) + # else: + # varname is uninitialized + # + # ... + # from_pyobj(varname) + # + {hasinitvalue: ' if (#varname#_capi == Py_None) #varname# = #init#; else', + '_depend': ''}, + {l_and(isoptional, l_not(hasinitvalue)): ' if (#varname#_capi != Py_None)', + '_depend': ''}, + {l_not(islogical): '''\ + f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); + if (f2py_success) {'''}, + {islogical: '''\ + #varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); + f2py_success = 1; + if (f2py_success) {'''}, + ], + 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname#*/', + 'need': {l_not(islogical): '#ctype#_from_pyobj'}, + '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide), + '_depend': '' + }, { # Hidden + 'frompyobj': {hasinitvalue: ' #varname# = #init#;'}, + 'need': typedef_need_dict, + '_check': l_and(isscalar, l_not(iscomplex), isintent_hide), + '_depend': '' + }, { # Common + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + '_check': l_and(isscalar, l_not(iscomplex)), + '_depend': '' + }, + # Complex scalars + { # Common + 'decl': ' #ctype# #varname#;', + 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + 'return': {isintent_out: ',#varname#_capi'}, + '_check': iscomplex + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + 'pyobjfrom': {isintent_inout: """\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); + if (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"}, + '_check': l_and(iscomplex, isintent_nothide) + }, { + 'frompyobj': [{hasinitvalue: ' if (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, + {l_and(isoptional, l_not(hasinitvalue)) + : ' if (#varname#_capi != Py_None)'}, + ' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' + '\n if (f2py_success) {'], + 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname# frompyobj*/', + 'need': ['#ctype#_from_pyobj'], + '_check': l_and(iscomplex, isintent_nothide), + '_depend': '' + }, { # Hidden + 'decl': {isintent_out: ' PyObject *#varname#_capi = Py_None;'}, + '_check': l_and(iscomplex, isintent_hide) + }, { + 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': l_and(iscomplex, isintent_hide), + '_depend': '' + }, { # Common + 'pyobjfrom': {isintent_out: ' #varname#_capi = pyobj_from_#ctype#1(#varname#);'}, + 'need': ['pyobj_from_#ctype#1'], + '_check': iscomplex + }, { + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + '_check': iscomplex, + '_depend': '' + }, + # String + { # Common + 'decl': [' #ctype# #varname# = NULL;', + ' int slen(#varname#);', + ' PyObject *#varname#_capi = Py_None;'], + 'callfortran':'#varname#,', + 'callfortranappend':'slen(#varname#),', + 'pyobjfrom':[ + {debugcapi: + ' fprintf(stderr,' + '"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + # The trailing null value for Fortran is blank. + {l_and(isintent_out, l_not(isintent_c)): + " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, + ], + 'return': {isintent_out: ',#varname#'}, + 'need': ['len..', + {l_and(isintent_out, l_not(isintent_c)): 'STRINGPADN'}], + '_check': isstring + }, { # Common + 'frompyobj': [ + """\ + slen(#varname#) = #elsize#; + f2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,""" +"""#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#""" +"""`#varname#\' of #pyname# to C #ctype#\"); + if (f2py_success) {""", + # The trailing null value for Fortran is blank. + {l_not(isintent_c): + " STRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"}, + ], + 'cleanupfrompyobj': """\ + STRINGFREE(#varname#); + } /*if (f2py_success) of #varname#*/""", + 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE', + {l_not(isintent_c): 'STRINGPADN'}], + '_check':isstring, + '_depend':'' + }, { # Not hidden + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': [ + {l_and(isintent_inout, l_not(isintent_c)): + " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, + {isintent_inout: '''\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#, + slen(#varname#)); + if (f2py_success) {'''}], + 'closepyobjfrom': {isintent_inout: ' } /*if (f2py_success) of #varname# pyobjfrom*/'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#', + l_and(isintent_inout, l_not(isintent_c)): 'STRINGPADN'}, + '_check': l_and(isstring, isintent_nothide) + }, { # Hidden + '_check': l_and(isstring, isintent_hide) + }, { + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + '_check': isstring, + '_depend': '' + }, + # Array + { # Common + 'decl': [' #ctype# *#varname# = NULL;', + ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + ' const int #varname#_Rank = #rank#;', + ' PyArrayObject *capi_#varname#_as_array = NULL;', + ' int capi_#varname#_intent = 0;', + {isstringarray: ' int slen(#varname#) = 0;'}, + ], + 'callfortran':'#varname#,', + 'callfortranappend': {isstringarray: 'slen(#varname#),'}, + 'return': {isintent_out: ',capi_#varname#_as_array'}, + 'need': 'len..', + '_check': isarray + }, { # intent(overwrite) array + 'decl': ' int capi_overwrite_#varname# = 1;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=1,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1', + '_check': l_and(isarray, isintent_overwrite), + }, { + 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_overwrite), + '_depend': '', + }, + { # intent(copy) array + 'decl': ' int capi_overwrite_#varname# = 0;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=0,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0', + '_check': l_and(isarray, isintent_copy), + }, { + 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_copy), + '_depend': '', + }, { + 'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray, + '_depend': '' + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + '_check': l_and(isarray, isintent_nothide) + }, { + 'frompyobj': [ + ' #setdims#;', + ' capi_#varname#_intent |= #intent#;', + (' const char * capi_errmess = "#modulename#.#pyname#:' + ' failed to create array from the #nth# `#varname#`";'), + {isintent_hide: + ' capi_#varname#_as_array = ndarray_from_pyobj(' + ' #atype#,#elsize#,#varname#_Dims,#varname#_Rank,' + ' capi_#varname#_intent,Py_None,capi_errmess);'}, + {isintent_nothide: + ' capi_#varname#_as_array = ndarray_from_pyobj(' + ' #atype#,#elsize#,#varname#_Dims,#varname#_Rank,' + ' capi_#varname#_intent,#varname#_capi,capi_errmess);'}, + """\ + if (capi_#varname#_as_array == NULL) { + PyObject* capi_err = PyErr_Occurred(); + if (capi_err == NULL) { + capi_err = #modulename#_error; + PyErr_SetString(capi_err, capi_errmess); + } + } else { + #varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_as_array)); +""", + {isstringarray: + ' slen(#varname#) = f2py_itemsize(#varname#);'}, + {hasinitvalue: [ + {isintent_nothide: + ' if (#varname#_capi == Py_None) {'}, + {isintent_hide: ' {'}, + {iscomplexarray: ' #ctype# capi_c;'}, + """\ + int *_i,capi_i=0; + CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); + if (initforcomb(PyArray_DIMS(capi_#varname#_as_array), + PyArray_NDIM(capi_#varname#_as_array),1)) { + while ((_i = nextforcomb())) + #varname#[capi_i++] = #init#; /* fortran way */ + } else { + PyObject *exc, *val, *tb; + PyErr_Fetch(&exc, &val, &tb); + PyErr_SetString(exc ? exc : #modulename#_error, + \"Initialization of #nth# #varname# failed (initforcomb).\"); + npy_PyErr_ChainExceptionsCause(exc, val, tb); + f2py_success = 0; + } + } + if (f2py_success) {"""]}, + ], + 'cleanupfrompyobj': [ # note that this list will be reversed + ' } ' + '/* if (capi_#varname#_as_array == NULL) ... else of #varname# */', + {l_not(l_or(isintent_out, isintent_hide)): """\ + if((PyObject *)capi_#varname#_as_array!=#varname#_capi) { + Py_XDECREF(capi_#varname#_as_array); }"""}, + {l_and(isintent_hide, l_not(isintent_out)) + : """ Py_XDECREF(capi_#varname#_as_array);"""}, + {hasinitvalue: ' } /*if (f2py_success) of #varname# init*/'}, + ], + '_check': isarray, + '_depend': '' + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': isunsigned_chararray, + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Character + { + 'need': 'string', + '_check': ischaracter, + }, + # Character array + { + 'need': 'string', + '_check': ischaracterarray, + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +################# Rules for checking ############### + +check_rules = [ + { + 'frompyobj': {debugcapi: ' fprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, + 'need': 'len..' + }, { + 'frompyobj': ' CHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': ' } /*CHECKSCALAR(#check#)*/', + 'need': 'CHECKSCALAR', + '_check': l_and(isscalar, l_not(iscomplex)), + '_break': '' + }, { + 'frompyobj': ' CHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': ' } /*CHECKSTRING(#check#)*/', + 'need': 'CHECKSTRING', + '_check': isstring, + '_break': '' + }, { + 'need': 'CHECKARRAY', + 'frompyobj': ' CHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': ' } /*CHECKARRAY(#check#)*/', + '_check': isarray, + '_break': '' + }, { + 'need': 'CHECKGENERIC', + 'frompyobj': ' CHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': ' } /*CHECKGENERIC(#check#)*/', + } +] + +########## Applying the rules. No need to modify what follows ############# + +#################### Build C/API module ####################### + + +def buildmodule(m, um): + """ + Return + """ + outmess(' Building module "%s"...\n' % (m['name'])) + ret = {} + mod_rules = defmod_rules[:] + vrd = capi_maps.modsign2map(m) + rd = dictappend({'f2py_version': f2py_version}, vrd) + funcwrappers = [] + funcwrappers2 = [] # F90 codes + for n in m['interfaced']: + nb = None + for bi in m['body']: + if bi['block'] not in ['interface', 'abstract interface']: + errmess('buildmodule: Expected interface block. Skipping.\n') + continue + for b in bi['body']: + if b['name'] == n: + nb = b + break + + if not nb: + print( + 'buildmodule: Could not find the body of interfaced routine "%s". Skipping.\n' % (n), file=sys.stderr) + continue + nb_list = [nb] + if 'entry' in nb: + for k, a in nb['entry'].items(): + nb1 = copy.deepcopy(nb) + del nb1['entry'] + nb1['name'] = k + nb1['args'] = a + nb_list.append(nb1) + for nb in nb_list: + # requiresf90wrapper must be called before buildapi as it + # rewrites assumed shape arrays as automatic arrays. + isf90 = requiresf90wrapper(nb) + # options is in scope here + if options['emptygen']: + b_path = options['buildpath'] + m_name = vrd['modulename'] + outmess(' Generating possibly empty wrappers"\n') + Path(f"{b_path}/{vrd['coutput']}").touch() + if isf90: + # f77 + f90 wrappers + outmess(f' Maybe empty "{m_name}-f2pywrappers2.f90"\n') + Path(f'{b_path}/{m_name}-f2pywrappers2.f90').touch() + outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n') + Path(f'{b_path}/{m_name}-f2pywrappers.f').touch() + else: + # only f77 wrappers + outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n') + Path(f'{b_path}/{m_name}-f2pywrappers.f').touch() + api, wrap = buildapi(nb) + if wrap: + if isf90: + funcwrappers2.append(wrap) + else: + funcwrappers.append(wrap) + ar = applyrules(api, vrd) + rd = dictappend(rd, ar) + + # Construct COMMON block support + cr, wrap = common_rules.buildhooks(m) + if wrap: + funcwrappers.append(wrap) + ar = applyrules(cr, vrd) + rd = dictappend(rd, ar) + + # Construct F90 module support + mr, wrap = f90mod_rules.buildhooks(m) + if wrap: + funcwrappers2.append(wrap) + ar = applyrules(mr, vrd) + rd = dictappend(rd, ar) + + for u in um: + ar = use_rules.buildusevars(u, m['use'][u['name']]) + rd = dictappend(rd, ar) + + needs = cfuncs.get_needs() + # Add mapped definitions + needs['typedefs'] += [cvar for cvar in capi_maps.f2cmap_mapped # + if cvar in typedef_need_dict.values()] + code = {} + for n in needs.keys(): + code[n] = [] + for k in needs[n]: + c = '' + if k in cfuncs.includes0: + c = cfuncs.includes0[k] + elif k in cfuncs.includes: + c = cfuncs.includes[k] + elif k in cfuncs.userincludes: + c = cfuncs.userincludes[k] + elif k in cfuncs.typedefs: + c = cfuncs.typedefs[k] + elif k in cfuncs.typedefs_generated: + c = cfuncs.typedefs_generated[k] + elif k in cfuncs.cppmacros: + c = cfuncs.cppmacros[k] + elif k in cfuncs.cfuncs: + c = cfuncs.cfuncs[k] + elif k in cfuncs.callbacks: + c = cfuncs.callbacks[k] + elif k in cfuncs.f90modhooks: + c = cfuncs.f90modhooks[k] + elif k in cfuncs.commonhooks: + c = cfuncs.commonhooks[k] + else: + errmess('buildmodule: unknown need %s.\n' % (repr(k))) + continue + code[n].append(c) + mod_rules.append(code) + for r in mod_rules: + if ('_check' in r and r['_check'](m)) or ('_check' not in r): + ar = applyrules(r, vrd, m) + rd = dictappend(rd, ar) + ar = applyrules(module_rules, rd) + + fn = os.path.join(options['buildpath'], vrd['coutput']) + ret['csrc'] = fn + with open(fn, 'w') as f: + f.write(ar['modulebody'].replace('\t', 2 * ' ')) + outmess(' Wrote C/API module "%s" to file "%s"\n' % (m['name'], fn)) + + if options['dorestdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.rest') + with open(fn, 'w') as f: + f.write('.. -*- rest -*-\n') + f.write('\n'.join(ar['restdoc'])) + outmess(' ReST Documentation is saved to file "%s/%smodule.rest"\n' % + (options['buildpath'], vrd['modulename'])) + if options['dolatexdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.tex') + ret['ltx'] = fn + with open(fn, 'w') as f: + f.write( + '%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version)) + if 'shortlatex' not in options: + f.write( + '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') + f.write('\n'.join(ar['latexdoc'])) + if 'shortlatex' not in options: + f.write('\\end{document}') + outmess(' Documentation is saved to file "%s/%smodule.tex"\n' % + (options['buildpath'], vrd['modulename'])) + if funcwrappers: + wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) + ret['fsrc'] = wn + with open(wn, 'w') as f: + f.write('C -*- fortran -*-\n') + f.write( + 'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) + f.write( + 'C It contains Fortran 77 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'): + if 0 <= l.find('!') < 66: + # don't split comment lines + lines.append(l + '\n') + elif l and l[0] == ' ': + while len(l) >= 66: + lines.append(l[:66] + '\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + outmess(' Fortran 77 wrappers are saved to "%s"\n' % (wn)) + if funcwrappers2: + wn = os.path.join( + options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename'])) + ret['fsrc'] = wn + with open(wn, 'w') as f: + f.write('! -*- f90 -*-\n') + f.write( + '! This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) + f.write( + '! It contains Fortran 90 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'): + if 0 <= l.find('!') < 72: + # don't split comment lines + lines.append(l + '\n') + elif len(l) > 72 and l[0] == ' ': + lines.append(l[:72] + '&\n &') + l = l[72:] + while len(l) > 66: + lines.append(l[:66] + '&\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + outmess(' Fortran 90 wrappers are saved to "%s"\n' % (wn)) + return ret + +################## Build C/API function ############# + +stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', + 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'} + + +def buildapi(rout): + rout, wrap = func2subr.assubr(rout) + args, depargs = getargs2(rout) + capi_maps.depargs = depargs + var = rout['vars'] + + if ismoduleroutine(rout): + outmess(' Constructing wrapper function "%s.%s"...\n' % + (rout['modulename'], rout['name'])) + else: + outmess(' Constructing wrapper function "%s"...\n' % (rout['name'])) + # Routine + vrd = capi_maps.routsign2map(rout) + rd = dictappend({}, vrd) + for r in rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + + # Args + nth, nthk = 0, 0 + savevrd = {} + for a in args: + vrd = capi_maps.sign2map(a, var[a]) + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + if not isintent_hide(var[a]): + if not isoptional(var[a]): + nth = nth + 1 + vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument' + else: + nthk = nthk + 1 + vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword' + else: + vrd['nth'] = 'hidden' + savevrd[a] = vrd + for r in _rules: + if '_depend' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + vrd = savevrd[a] + for r in _rules: + if '_depend' not in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'check' in var[a]: + for c in var[a]['check']: + vrd['check'] = c + ar = applyrules(check_rules, vrd, var[a]) + rd = dictappend(rd, ar) + if isinstance(rd['cleanupfrompyobj'], list): + rd['cleanupfrompyobj'].reverse() + if isinstance(rd['closepyobjfrom'], list): + rd['closepyobjfrom'].reverse() + rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#', + {'docsign': rd['docsign'], + 'docsignopt': rd['docsignopt'], + 'docsignxa': rd['docsignxa']})) + optargs = stripcomma(replace('#docsignopt##docsignxa#', + {'docsignxa': rd['docsignxashort'], + 'docsignopt': rd['docsignoptshort']} + )) + if optargs == '': + rd['docsignatureshort'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_') + rd['latexdocsignatureshort'] = rd[ + 'latexdocsignatureshort'].replace(',', ', ') + cfs = stripcomma(replace('#callfortran##callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + if len(rd['callfortranappend']) > 1: + rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + else: + rd['callcompaqfortran'] = cfs + rd['callfortran'] = cfs + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = ' + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + + ar = applyrules(routine_rules, rd) + if ismoduleroutine(rout): + outmess(' %s\n' % (ar['docshort'])) + else: + outmess(' %s\n' % (ar['docshort'])) + return ar, wrap + + +#################### EOF rules.py ####################### diff --git a/phivenv/Lib/site-packages/numpy/f2py/setup.cfg b/phivenv/Lib/site-packages/numpy/f2py/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..fd3c6f4895e598adbd2c4c875052f464066a5742 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/setup.cfg @@ -0,0 +1,3 @@ +[bdist_rpm] +doc_files = docs/ + tests/ \ No newline at end of file diff --git a/phivenv/Lib/site-packages/numpy/f2py/src/fortranobject.c b/phivenv/Lib/site-packages/numpy/f2py/src/fortranobject.c new file mode 100644 index 0000000000000000000000000000000000000000..516f7f3641914701d87d8101b5f31d0891f2d7a4 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/src/fortranobject.c @@ -0,0 +1,1423 @@ +#define FORTRANOBJECT_C +#include "fortranobject.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/* + This file implements: FortranObject, array_from_pyobj, copy_ND_array + + Author: Pearu Peterson + $Revision: 1.52 $ + $Date: 2005/07/11 07:44:20 $ +*/ + +int +F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj) +{ + if (obj == NULL) { + fprintf(stderr, "Error loading %s\n", name); + if (PyErr_Occurred()) { + PyErr_Print(); + PyErr_Clear(); + } + return -1; + } + return PyDict_SetItemString(dict, name, obj); +} + +/* + * Python-only fallback for thread-local callback pointers + */ +void * +F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) +{ + PyObject *local_dict, *value; + void *prev; + + local_dict = PyThreadState_GetDict(); + if (local_dict == NULL) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyThreadState_GetDict " + "failed"); + } + + value = PyDict_GetItemString(local_dict, key); + if (value != NULL) { + prev = PyLong_AsVoidPtr(value); + if (PyErr_Occurred()) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + } + } + else { + prev = NULL; + } + + value = PyLong_FromVoidPtr((void *)ptr); + if (value == NULL) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyLong_FromVoidPtr failed"); + } + + if (PyDict_SetItemString(local_dict, key, value) != 0) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyDict_SetItemString failed"); + } + + Py_DECREF(value); + + return prev; +} + +void * +F2PyGetThreadLocalCallbackPtr(char *key) +{ + PyObject *local_dict, *value; + void *prev; + + local_dict = PyThreadState_GetDict(); + if (local_dict == NULL) { + Py_FatalError( + "F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); + } + + value = PyDict_GetItemString(local_dict, key); + if (value != NULL) { + prev = PyLong_AsVoidPtr(value); + if (PyErr_Occurred()) { + Py_FatalError( + "F2PyGetThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + } + } + else { + prev = NULL; + } + + return prev; +} + +static PyArray_Descr * +get_descr_from_type_and_elsize(const int type_num, const int elsize) { + PyArray_Descr * descr = PyArray_DescrFromType(type_num); + if (type_num == NPY_STRING) { + // PyArray_DescrFromType returns descr with elsize = 0. + PyArray_DESCR_REPLACE(descr); + if (descr == NULL) { + return NULL; + } + PyDataType_SET_ELSIZE(descr, elsize); + } + return descr; +} + +/************************* FortranObject *******************************/ + +typedef PyObject *(*fortranfunc)(PyObject *, PyObject *, PyObject *, void *); + +PyObject * +PyFortranObject_New(FortranDataDef *defs, f2py_void_func init) +{ + int i; + PyFortranObject *fp = NULL; + PyObject *v = NULL; + if (init != NULL) { /* Initialize F90 module objects */ + (*(init))(); + } + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) { + return NULL; + } + if ((fp->dict = PyDict_New()) == NULL) { + Py_DECREF(fp); + return NULL; + } + fp->len = 0; + while (defs[fp->len].name != NULL) { + fp->len++; + } + if (fp->len == 0) { + goto fail; + } + fp->defs = defs; + for (i = 0; i < fp->len; i++) { + if (fp->defs[i].rank == -1) { /* Is Fortran routine */ + v = PyFortranObject_NewAsAttr(&(fp->defs[i])); + if (v == NULL) { + goto fail; + } + PyDict_SetItemString(fp->dict, fp->defs[i].name, v); + Py_XDECREF(v); + } + else if ((fp->defs[i].data) != + NULL) { /* Is Fortran variable or array (not allocatable) */ + PyArray_Descr * + descr = get_descr_from_type_and_elsize(fp->defs[i].type, + fp->defs[i].elsize); + if (descr == NULL) { + goto fail; + } + v = PyArray_NewFromDescr(&PyArray_Type, descr, fp->defs[i].rank, + fp->defs[i].dims.d, NULL, fp->defs[i].data, + NPY_ARRAY_FARRAY, NULL); + if (v == NULL) { + Py_DECREF(descr); + goto fail; + } + PyDict_SetItemString(fp->dict, fp->defs[i].name, v); + Py_XDECREF(v); + } + } + return (PyObject *)fp; +fail: + Py_XDECREF(fp); + return NULL; +} + +PyObject * +PyFortranObject_NewAsAttr(FortranDataDef *defs) +{ /* used for calling F90 module routines */ + PyFortranObject *fp = NULL; + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) + return NULL; + if ((fp->dict = PyDict_New()) == NULL) { + PyObject_Del(fp); + return NULL; + } + fp->len = 1; + fp->defs = defs; + if (defs->rank == -1) { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("function %s", defs->name)); + } else if (defs->rank == 0) { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("scalar %s", defs->name)); + } else { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("array %s", defs->name)); + } + return (PyObject *)fp; +} + +/* Fortran methods */ + +static void +fortran_dealloc(PyFortranObject *fp) +{ + Py_XDECREF(fp->dict); + PyObject_Del(fp); +} + +/* Returns number of bytes consumed from buf, or -1 on error. */ +static Py_ssize_t +format_def(char *buf, Py_ssize_t size, FortranDataDef def) +{ + char *p = buf; + int i; + npy_intp n; + + n = PyOS_snprintf(p, size, "array(%" NPY_INTP_FMT, def.dims.d[0]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + + for (i = 1; i < def.rank; i++) { + n = PyOS_snprintf(p, size, ",%" NPY_INTP_FMT, def.dims.d[i]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + } + + if (size <= 0) { + return -1; + } + + *p++ = ')'; + size--; + + if (def.data == NULL) { + static const char notalloc[] = ", not allocated"; + if ((size_t)size < sizeof(notalloc)) { + return -1; + } + memcpy(p, notalloc, sizeof(notalloc)); + p += sizeof(notalloc); + size -= sizeof(notalloc); + } + + return p - buf; +} + +static PyObject * +fortran_doc(FortranDataDef def) +{ + char *buf, *p; + PyObject *s = NULL; + Py_ssize_t n, origsize, size = 100; + + if (def.doc != NULL) { + size += strlen(def.doc); + } + origsize = size; + buf = p = (char *)PyMem_Malloc(size); + if (buf == NULL) { + return PyErr_NoMemory(); + } + + if (def.rank == -1) { + if (def.doc) { + n = strlen(def.doc); + if (n > size) { + goto fail; + } + memcpy(p, def.doc, n); + p += n; + size -= n; + } + else { + n = PyOS_snprintf(p, size, "%s - no docs available", def.name); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + } + } + else { + PyArray_Descr *d = PyArray_DescrFromType(def.type); + n = PyOS_snprintf(p, size, "%s : '%c'-", def.name, d->type); + Py_DECREF(d); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + + if (def.data == NULL) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else if (def.rank > 0) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else { + n = strlen("scalar"); + if (size < n) { + goto fail; + } + memcpy(p, "scalar", n); + p += n; + size -= n; + } + } + if (size <= 1) { + goto fail; + } + *p++ = '\n'; + size--; + + /* p now points one beyond the last character of the string in buf */ + s = PyUnicode_FromStringAndSize(buf, p - buf); + + PyMem_Free(buf); + return s; + +fail: + fprintf(stderr, + "fortranobject.c: fortran_doc: len(p)=%zd>%zd=size:" + " too long docstring required, increase size\n", + p - buf, origsize); + PyMem_Free(buf); + return NULL; +} + +static FortranDataDef *save_def; /* save pointer of an allocatable array */ +static void +set_data(char *d, npy_intp *f) +{ /* callback from Fortran */ + if (*f) /* In fortran f=allocated(d) */ + save_def->data = d; + else + save_def->data = NULL; + /* printf("set_data: d=%p,f=%d\n",d,*f); */ +} + +static PyObject * +fortran_getattr(PyFortranObject *fp, char *name) +{ + int i, j, k, flag; + if (fp->dict != NULL) { + PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); + if (v == NULL && PyErr_Occurred()) { + return NULL; + } + else if (v != NULL) { + Py_INCREF(v); + return v; + } + } + for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); + i++) + ; + if (j == 0) + if (fp->defs[i].rank != -1) { /* F90 allocatable array */ + if (fp->defs[i].func == NULL) + return NULL; + for (k = 0; k < fp->defs[i].rank; ++k) fp->defs[i].dims.d[k] = -1; + save_def = &fp->defs[i]; + (*(fp->defs[i].func))(&fp->defs[i].rank, fp->defs[i].dims.d, + set_data, &flag); + if (flag == 2) + k = fp->defs[i].rank + 1; + else + k = fp->defs[i].rank; + if (fp->defs[i].data != NULL) { /* array is allocated */ + PyObject *v = PyArray_New( + &PyArray_Type, k, fp->defs[i].dims.d, fp->defs[i].type, + NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, NULL); + if (v == NULL) + return NULL; + /* Py_INCREF(v); */ + return v; + } + else { /* array is not allocated */ + Py_RETURN_NONE; + } + } + if (strcmp(name, "__dict__") == 0) { + Py_INCREF(fp->dict); + return fp->dict; + } + if (strcmp(name, "__doc__") == 0) { + PyObject *s = PyUnicode_FromString(""), *s2, *s3; + for (i = 0; i < fp->len; i++) { + s2 = fortran_doc(fp->defs[i]); + s3 = PyUnicode_Concat(s, s2); + Py_DECREF(s2); + Py_DECREF(s); + s = s3; + } + if (PyDict_SetItemString(fp->dict, name, s)) + return NULL; + return s; + } + if ((strcmp(name, "_cpointer") == 0) && (fp->len == 1)) { + PyObject *cobj = + F2PyCapsule_FromVoidPtr((void *)(fp->defs[0].data), NULL); + if (PyDict_SetItemString(fp->dict, name, cobj)) + return NULL; + return cobj; + } + PyObject *str, *ret; + str = PyUnicode_FromString(name); + ret = PyObject_GenericGetAttr((PyObject *)fp, str); + Py_DECREF(str); + return ret; +} + +static int +fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) +{ + int i, j, flag; + PyArrayObject *arr = NULL; + for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); + i++) + ; + if (j == 0) { + if (fp->defs[i].rank == -1) { + PyErr_SetString(PyExc_AttributeError, + "over-writing fortran routine"); + return -1; + } + if (fp->defs[i].func != NULL) { /* is allocatable array */ + npy_intp dims[F2PY_MAX_DIMS]; + int k; + save_def = &fp->defs[i]; + if (v != Py_None) { /* set new value (reallocate if needed -- + see f2py generated code for more + details ) */ + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1; + if ((arr = array_from_pyobj(fp->defs[i].type, dims, + fp->defs[i].rank, F2PY_INTENT_IN, + v)) == NULL) + return -1; + (*(fp->defs[i].func))(&fp->defs[i].rank, PyArray_DIMS(arr), + set_data, &flag); + } + else { /* deallocate */ + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = 0; + (*(fp->defs[i].func))(&fp->defs[i].rank, dims, set_data, + &flag); + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1; + } + memcpy(fp->defs[i].dims.d, dims, + fp->defs[i].rank * sizeof(npy_intp)); + } + else { /* not allocatable array */ + if ((arr = array_from_pyobj(fp->defs[i].type, fp->defs[i].dims.d, + fp->defs[i].rank, F2PY_INTENT_IN, + v)) == NULL) + return -1; + } + if (fp->defs[i].data != + NULL) { /* copy Python object to Fortran array */ + npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d, + PyArray_NDIM(arr)); + if (s == -1) + s = PyArray_MultiplyList(PyArray_DIMS(arr), PyArray_NDIM(arr)); + if (s < 0 || (memcpy(fp->defs[i].data, PyArray_DATA(arr), + s * PyArray_ITEMSIZE(arr))) == NULL) { + if ((PyObject *)arr != v) { + Py_DECREF(arr); + } + return -1; + } + if ((PyObject *)arr != v) { + Py_DECREF(arr); + } + } + else + return (fp->defs[i].func == NULL ? -1 : 0); + return 0; /* successful */ + } + if (fp->dict == NULL) { + fp->dict = PyDict_New(); + if (fp->dict == NULL) + return -1; + } + if (v == NULL) { + int rv = PyDict_DelItemString(fp->dict, name); + if (rv < 0) + PyErr_SetString(PyExc_AttributeError, + "delete non-existing fortran attribute"); + return rv; + } + else + return PyDict_SetItemString(fp->dict, name, v); +} + +static PyObject * +fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw) +{ + int i = 0; + /* printf("fortran call + name=%s,func=%p,data=%p,%p\n",fp->defs[i].name, + fp->defs[i].func,fp->defs[i].data,&fp->defs[i].data); */ + if (fp->defs[i].rank == -1) { /* is Fortran routine */ + if (fp->defs[i].func == NULL) { + PyErr_Format(PyExc_RuntimeError, "no function to call"); + return NULL; + } + else if (fp->defs[i].data == NULL) + /* dummy routine */ + return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp, arg, + kw, NULL); + else + return (*((fortranfunc)(fp->defs[i].func)))( + (PyObject *)fp, arg, kw, (void *)fp->defs[i].data); + } + PyErr_Format(PyExc_TypeError, "this fortran object is not callable"); + return NULL; +} + +static PyObject * +fortran_repr(PyFortranObject *fp) +{ + PyObject *name = NULL, *repr = NULL; + name = PyObject_GetAttrString((PyObject *)fp, "__name__"); + PyErr_Clear(); + if (name != NULL && PyUnicode_Check(name)) { + repr = PyUnicode_FromFormat("", name); + } + else { + repr = PyUnicode_FromString(""); + } + Py_XDECREF(name); + return repr; +} + +PyTypeObject PyFortran_Type = { + PyVarObject_HEAD_INIT(NULL, 0).tp_name = "fortran", + .tp_basicsize = sizeof(PyFortranObject), + .tp_dealloc = (destructor)fortran_dealloc, + .tp_getattr = (getattrfunc)fortran_getattr, + .tp_setattr = (setattrfunc)fortran_setattr, + .tp_repr = (reprfunc)fortran_repr, + .tp_call = (ternaryfunc)fortran_call, +}; + +/************************* f2py_report_atexit *******************************/ + +#ifdef F2PY_REPORT_ATEXIT +static int passed_time = 0; +static int passed_counter = 0; +static int passed_call_time = 0; +static struct timeb start_time; +static struct timeb stop_time; +static struct timeb start_call_time; +static struct timeb stop_call_time; +static int cb_passed_time = 0; +static int cb_passed_counter = 0; +static int cb_passed_call_time = 0; +static struct timeb cb_start_time; +static struct timeb cb_stop_time; +static struct timeb cb_start_call_time; +static struct timeb cb_stop_call_time; + +extern void +f2py_start_clock(void) +{ + ftime(&start_time); +} +extern void +f2py_start_call_clock(void) +{ + f2py_stop_clock(); + ftime(&start_call_time); +} +extern void +f2py_stop_clock(void) +{ + ftime(&stop_time); + passed_time += 1000 * (stop_time.time - start_time.time); + passed_time += stop_time.millitm - start_time.millitm; +} +extern void +f2py_stop_call_clock(void) +{ + ftime(&stop_call_time); + passed_call_time += 1000 * (stop_call_time.time - start_call_time.time); + passed_call_time += stop_call_time.millitm - start_call_time.millitm; + passed_counter += 1; + f2py_start_clock(); +} + +extern void +f2py_cb_start_clock(void) +{ + ftime(&cb_start_time); +} +extern void +f2py_cb_start_call_clock(void) +{ + f2py_cb_stop_clock(); + ftime(&cb_start_call_time); +} +extern void +f2py_cb_stop_clock(void) +{ + ftime(&cb_stop_time); + cb_passed_time += 1000 * (cb_stop_time.time - cb_start_time.time); + cb_passed_time += cb_stop_time.millitm - cb_start_time.millitm; +} +extern void +f2py_cb_stop_call_clock(void) +{ + ftime(&cb_stop_call_time); + cb_passed_call_time += + 1000 * (cb_stop_call_time.time - cb_start_call_time.time); + cb_passed_call_time += + cb_stop_call_time.millitm - cb_start_call_time.millitm; + cb_passed_counter += 1; + f2py_cb_start_clock(); +} + +static int f2py_report_on_exit_been_here = 0; +extern void +f2py_report_on_exit(int exit_flag, void *name) +{ + if (f2py_report_on_exit_been_here) { + fprintf(stderr, " %s\n", (char *)name); + return; + } + f2py_report_on_exit_been_here = 1; + fprintf(stderr, " /-----------------------\\\n"); + fprintf(stderr, " < F2PY performance report >\n"); + fprintf(stderr, " \\-----------------------/\n"); + fprintf(stderr, "Overall time spent in ...\n"); + fprintf(stderr, "(a) wrapped (Fortran/C) functions : %8d msec\n", + passed_call_time); + fprintf(stderr, "(b) f2py interface, %6d calls : %8d msec\n", + passed_counter, passed_time); + fprintf(stderr, "(c) call-back (Python) functions : %8d msec\n", + cb_passed_call_time); + fprintf(stderr, "(d) f2py call-back interface, %6d calls : %8d msec\n", + cb_passed_counter, cb_passed_time); + + fprintf(stderr, + "(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n", + passed_call_time - cb_passed_call_time - cb_passed_time); + fprintf(stderr, + "Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n"); + fprintf(stderr, "Exit status: %d\n", exit_flag); + fprintf(stderr, "Modules : %s\n", (char *)name); +} +#endif + +/********************** report on array copy ****************************/ + +#ifdef F2PY_REPORT_ON_ARRAY_COPY +static void +f2py_report_on_array_copy(PyArrayObject *arr) +{ + const npy_intp arr_size = PyArray_Size((PyObject *)arr); + if (arr_size > F2PY_REPORT_ON_ARRAY_COPY) { + fprintf(stderr, + "copied an array: size=%ld, elsize=%" NPY_INTP_FMT "\n", + arr_size, (npy_intp)PyArray_ITEMSIZE(arr)); + } +} +static void +f2py_report_on_array_copy_fromany(void) +{ + fprintf(stderr, "created an array from object\n"); +} + +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR \ + f2py_report_on_array_copy((PyArrayObject *)arr) +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY f2py_report_on_array_copy_fromany() +#else +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY +#endif + +/************************* array_from_obj *******************************/ + +/* + * File: array_from_pyobj.c + * + * Description: + * ------------ + * Provides array_from_pyobj function that returns a contiguous array + * object with the given dimensions and required storage order, either + * in row-major (C) or column-major (Fortran) order. The function + * array_from_pyobj is very flexible about its Python object argument + * that can be any number, list, tuple, or array. + * + * array_from_pyobj is used in f2py generated Python extension + * modules. + * + * Author: Pearu Peterson + * Created: 13-16 January 2002 + * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $ + */ + +static int check_and_fix_dimensions(const PyArrayObject* arr, + const int rank, + npy_intp *dims, + const char *errmess); + +static int +find_first_negative_dimension(const int rank, const npy_intp *dims) +{ + int i; + for (i = 0; i < rank; ++i) { + if (dims[i] < 0) { + return i; + } + } + return -1; +} + +#ifdef DEBUG_COPY_ND_ARRAY +void +dump_dims(int rank, npy_intp const *dims) +{ + int i; + printf("["); + for (i = 0; i < rank; ++i) { + printf("%3" NPY_INTP_FMT, dims[i]); + } + printf("]\n"); +} +void +dump_attrs(const PyArrayObject *obj) +{ + const PyArrayObject_fields *arr = (const PyArrayObject_fields *)obj; + int rank = PyArray_NDIM(arr); + npy_intp size = PyArray_Size((PyObject *)arr); + printf("\trank = %d, flags = %d, size = %" NPY_INTP_FMT "\n", rank, + arr->flags, size); + printf("\tstrides = "); + dump_dims(rank, arr->strides); + printf("\tdimensions = "); + dump_dims(rank, arr->dimensions); +} +#endif + +#define SWAPTYPE(a, b, t) \ + { \ + t c; \ + c = (a); \ + (a) = (b); \ + (b) = c; \ + } + +static int +swap_arrays(PyArrayObject *obj1, PyArrayObject *obj2) +{ + PyArrayObject_fields *arr1 = (PyArrayObject_fields *)obj1, + *arr2 = (PyArrayObject_fields *)obj2; + SWAPTYPE(arr1->data, arr2->data, char *); + SWAPTYPE(arr1->nd, arr2->nd, int); + SWAPTYPE(arr1->dimensions, arr2->dimensions, npy_intp *); + SWAPTYPE(arr1->strides, arr2->strides, npy_intp *); + SWAPTYPE(arr1->base, arr2->base, PyObject *); + SWAPTYPE(arr1->descr, arr2->descr, PyArray_Descr *); + SWAPTYPE(arr1->flags, arr2->flags, int); + /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ + return 0; +} + +#define ARRAY_ISCOMPATIBLE(arr,type_num) \ + ((PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) || \ + (PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) || \ + (PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) || \ + (PyArray_ISBOOL(arr) && PyTypeNum_ISBOOL(type_num)) || \ + (PyArray_ISSTRING(arr) && PyTypeNum_ISSTRING(type_num))) + +static int +get_elsize(PyObject *obj) { + /* + get_elsize determines array itemsize from a Python object. Returns + elsize if successful, -1 otherwise. + + Supported types of the input are: numpy.ndarray, bytes, str, tuple, + list. + */ + + if (PyArray_Check(obj)) { + return PyArray_ITEMSIZE((PyArrayObject *)obj); + } else if (PyBytes_Check(obj)) { + return PyBytes_GET_SIZE(obj); + } else if (PyUnicode_Check(obj)) { + return PyUnicode_GET_LENGTH(obj); + } else if (PySequence_Check(obj)) { + PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); + if (fast != NULL) { + Py_ssize_t i, n = PySequence_Fast_GET_SIZE(fast); + int sz, elsize = 0; + for (i=0; i elsize) { + elsize = sz; + } + } + Py_DECREF(fast); + return elsize; + } + } + return -1; +} + +extern PyArrayObject * +ndarray_from_pyobj(const int type_num, + const int elsize_, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj, + const char *errmess) { + /* + * Return an array with given element type and shape from a Python + * object while taking into account the usage intent of the array. + * + * - element type is defined by type_num and elsize + * - shape is defined by dims and rank + * + * ndarray_from_pyobj is used to convert Python object arguments + * to numpy ndarrays with given type and shape that data is passed + * to interfaced Fortran or C functions. + * + * errmess (if not NULL), contains a prefix of an error message + * for an exception to be triggered within this function. + * + * Negative elsize value means that elsize is to be determined + * from the Python object in runtime. + * + * Note on strings + * --------------- + * + * String type (type_num == NPY_STRING) does not have fixed + * element size and, by default, the type object sets it to + * 0. Therefore, for string types, one has to use elsize + * argument. For other types, elsize value is ignored. + * + * NumPy defines the type of a fixed-width string as + * dtype('S'). In addition, there is also dtype('c'), that + * appears as dtype('S1') (these have the same type_num value), + * but is actually different (.char attribute is either 'S' or + * 'c', respecitely). + * + * In Fortran, character arrays and strings are different + * concepts. The relation between Fortran types, NumPy dtypes, + * and type_num-elsize pairs, is defined as follows: + * + * character*5 foo | dtype('S5') | elsize=5, shape=() + * character(5) foo | dtype('S1') | elsize=1, shape=(5) + * character*5 foo(n) | dtype('S5') | elsize=5, shape=(n,) + * character(5) foo(n) | dtype('S1') | elsize=1, shape=(5, n) + * character*(*) foo | dtype('S') | elsize=-1, shape=() + * + * Note about reference counting + * ----------------------------- + * + * If the caller returns the array to Python, it must be done with + * Py_BuildValue("N",arr). Otherwise, if obj!=arr then the caller + * must call Py_DECREF(arr). + * + * Note on intent(cache,out,..) + * ---------------------------- + * Don't expect correct data when returning intent(cache) array. + * + */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + PyArrayObject *arr = NULL; + int elsize = (elsize_ < 0 ? get_elsize(obj) : elsize_); + if (elsize < 0) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- failed to determine element size from %s", + Py_TYPE(obj)->tp_name); + PyErr_SetString(PyExc_SystemError, mess); + return NULL; + } + PyArray_Descr * descr = get_descr_from_type_and_elsize(type_num, elsize); // new reference + if (descr == NULL) { + return NULL; + } + elsize = PyDataType_ELSIZE(descr); + if ((intent & F2PY_INTENT_HIDE) + || ((intent & F2PY_INTENT_CACHE) && (obj == Py_None)) + || ((intent & F2PY_OPTIONAL) && (obj == Py_None)) + ) { + /* intent(cache), optional, intent(hide) */ + int ineg = find_first_negative_dimension(rank, dims); + if (ineg >= 0) { + int i; + strcpy(mess, "failed to create intent(cache|hide)|optional array" + "-- must have defined dimensions but got ("); + for(i = 0; i < rank; ++i) + sprintf(mess + strlen(mess), "%" NPY_INTP_FMT ",", dims[i]); + strcat(mess, ")"); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + arr = (PyArrayObject *) \ + PyArray_NewFromDescr(&PyArray_Type, descr, rank, dims, + NULL, NULL, !(intent & F2PY_INTENT_C), NULL); + if (arr == NULL) { + Py_DECREF(descr); + return NULL; + } + if (PyArray_ITEMSIZE(arr) != elsize) { + strcpy(mess, "failed to create intent(cache|hide)|optional array"); + sprintf(mess+strlen(mess)," -- expected elsize=%d got %" NPY_INTP_FMT, elsize, (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError,mess); + Py_DECREF(arr); + return NULL; + } + if (!(intent & F2PY_INTENT_CACHE)) { + PyArray_FILLWBYTE(arr, 0); + } + return arr; + } + + if (PyArray_Check(obj)) { + arr = (PyArrayObject *)obj; + if (intent & F2PY_INTENT_CACHE) { + /* intent(cache) */ + if (PyArray_ISONESEGMENT(arr) + && PyArray_ITEMSIZE(arr) >= elsize) { + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(descr); + return NULL; + } + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + Py_DECREF(descr); + return arr; + } + strcpy(mess, "failed to initialize intent(cache) array"); + if (!PyArray_ISONESEGMENT(arr)) + strcat(mess, " -- input must be in one segment"); + if (PyArray_ITEMSIZE(arr) < elsize) + sprintf(mess + strlen(mess), + " -- expected at least elsize=%d but got " + "%" NPY_INTP_FMT, + elsize, (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + + /* here we have always intent(in) or intent(inout) or intent(inplace) + */ + + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(descr); + return NULL; + } + /* + printf("intent alignment=%d\n", F2PY_GET_ALIGNMENT(intent)); + printf("alignment check=%d\n", F2PY_CHECK_ALIGNMENT(arr, intent)); + int i; + for (i=1;i<=16;i++) + printf("i=%d isaligned=%d\n", i, ARRAY_ISALIGNED(arr, i)); + */ + if ((! (intent & F2PY_INTENT_COPY)) && + PyArray_ITEMSIZE(arr) == elsize && + ARRAY_ISCOMPATIBLE(arr,type_num) && + F2PY_CHECK_ALIGNMENT(arr, intent)) { + if ((intent & F2PY_INTENT_INOUT || intent & F2PY_INTENT_INPLACE) + ? ((intent & F2PY_INTENT_C) ? PyArray_ISCARRAY(arr) : PyArray_ISFARRAY(arr)) + : ((intent & F2PY_INTENT_C) ? PyArray_ISCARRAY_RO(arr) : PyArray_ISFARRAY_RO(arr))) { + if ((intent & F2PY_INTENT_OUT)) { + Py_INCREF(arr); + } + /* Returning input array */ + Py_DECREF(descr); + return arr; + } + } + if (intent & F2PY_INTENT_INOUT) { + strcpy(mess, "failed to initialize intent(inout) array"); + /* Must use PyArray_IS*ARRAY because intent(inout) requires + * writable input */ + if ((intent & F2PY_INTENT_C) && !PyArray_ISCARRAY(arr)) + strcat(mess, " -- input not contiguous"); + if (!(intent & F2PY_INTENT_C) && !PyArray_ISFARRAY(arr)) + strcat(mess, " -- input not fortran contiguous"); + if (PyArray_ITEMSIZE(arr) != elsize) + sprintf(mess + strlen(mess), + " -- expected elsize=%d but got %" NPY_INTP_FMT, + elsize, + (npy_intp)PyArray_ITEMSIZE(arr) + ); + if (!(ARRAY_ISCOMPATIBLE(arr, type_num))) { + sprintf(mess + strlen(mess), + " -- input '%c' not compatible to '%c'", + PyArray_DESCR(arr)->type, descr->type); + } + if (!(F2PY_CHECK_ALIGNMENT(arr, intent))) + sprintf(mess + strlen(mess), " -- input not %d-aligned", + F2PY_GET_ALIGNMENT(intent)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + + /* here we have always intent(in) or intent(inplace) */ + + { + PyArrayObject * retarr = (PyArrayObject *) \ + PyArray_NewFromDescr(&PyArray_Type, descr, PyArray_NDIM(arr), PyArray_DIMS(arr), + NULL, NULL, !(intent & F2PY_INTENT_C), NULL); + if (retarr==NULL) { + Py_DECREF(descr); + return NULL; + } + F2PY_REPORT_ON_ARRAY_COPY_FROMARR; + if (PyArray_CopyInto(retarr, arr)) { + Py_DECREF(retarr); + return NULL; + } + if (intent & F2PY_INTENT_INPLACE) { + if (swap_arrays(arr,retarr)) { + Py_DECREF(retarr); + return NULL; /* XXX: set exception */ + } + Py_XDECREF(retarr); + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + } else { + arr = retarr; + } + } + return arr; + } + + if ((intent & F2PY_INTENT_INOUT) || (intent & F2PY_INTENT_INPLACE) || + (intent & F2PY_INTENT_CACHE)) { + PyErr_Format(PyExc_TypeError, + "failed to initialize intent(inout|inplace|cache) " + "array, input '%s' object is not an array", + Py_TYPE(obj)->tp_name); + Py_DECREF(descr); + return NULL; + } + + { + F2PY_REPORT_ON_ARRAY_COPY_FROMANY; + arr = (PyArrayObject *)PyArray_FromAny( + obj, descr, 0, 0, + ((intent & F2PY_INTENT_C) ? NPY_ARRAY_CARRAY + : NPY_ARRAY_FARRAY) | + NPY_ARRAY_FORCECAST, + NULL); + // Warning: in the case of NPY_STRING, PyArray_FromAny may + // reset descr->elsize, e.g. dtype('S0') becomes dtype('S1'). + if (arr == NULL) { + Py_DECREF(descr); + return NULL; + } + if (type_num != NPY_STRING && PyArray_ITEMSIZE(arr) != elsize) { + // This is internal sanity tests: elsize has been set to + // descr->elsize in the beginning of this function. + strcpy(mess, "failed to initialize intent(in) array"); + sprintf(mess + strlen(mess), + " -- expected elsize=%d got %" NPY_INTP_FMT, elsize, + (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(arr); + return NULL; + } + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(arr); + return NULL; + } + return arr; + } +} + +extern PyArrayObject * +array_from_pyobj(const int type_num, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj) { + /* + Same as ndarray_from_pyobj but with elsize determined from type, + if possible. Provided for backward compatibility. + */ + PyArray_Descr* descr = PyArray_DescrFromType(type_num); + int elsize = PyDataType_ELSIZE(descr); + Py_DECREF(descr); + return ndarray_from_pyobj(type_num, elsize, dims, rank, intent, obj, NULL); +} + +/*****************************************/ +/* Helper functions for array_from_pyobj */ +/*****************************************/ + +static int +check_and_fix_dimensions(const PyArrayObject* arr, const int rank, + npy_intp *dims, const char *errmess) +{ + /* + * This function fills in blanks (that are -1's) in dims list using + * the dimensions from arr. It also checks that non-blank dims will + * match with the corresponding values in arr dimensions. + * + * Returns 0 if the function is successful. + * + * If an error condition is detected, an exception is set and 1 is + * returned. + */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + const npy_intp arr_size = + (PyArray_NDIM(arr)) ? PyArray_Size((PyObject *)arr) : 1; +#ifdef DEBUG_COPY_ND_ARRAY + dump_attrs(arr); + printf("check_and_fix_dimensions:init: dims="); + dump_dims(rank, dims); +#endif + if (rank > PyArray_NDIM(arr)) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */ + npy_intp new_size = 1; + int free_axe = -1; + int i; + npy_intp d; + /* Fill dims where -1 or 0; check dimensions; calc new_size; */ + for (i = 0; i < PyArray_NDIM(arr); ++i) { + d = PyArray_DIM(arr, i); + if (dims[i] >= 0) { + if (d > 1 && dims[i] != d) { + PyErr_Format( + PyExc_ValueError, + "%d-th dimension must be fixed to %" NPY_INTP_FMT + " but got %" NPY_INTP_FMT "\n", + i, dims[i], d); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else { + dims[i] = d ? d : 1; + } + new_size *= dims[i]; + } + for (i = PyArray_NDIM(arr); i < rank; ++i) + if (dims[i] > 1) { + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be %" NPY_INTP_FMT + " but got 0 (not defined).\n", + i, dims[i]); + return 1; + } + else if (free_axe < 0) + free_axe = i; + else + dims[i] = 1; + if (free_axe >= 0) { + dims[free_axe] = arr_size / new_size; + new_size *= dims[free_axe]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT + " (maybe too many free indices)\n", + new_size, arr_size); + return 1; + } + } + else if (rank == PyArray_NDIM(arr)) { + npy_intp new_size = 1; + int i; + npy_intp d; + for (i = 0; i < rank; ++i) { + d = PyArray_DIM(arr, i); + if (dims[i] >= 0) { + if (d > 1 && d != dims[i]) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- %d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT, + i, dims[i], d); + PyErr_SetString(PyExc_ValueError, mess); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else + dims[i] = d; + new_size *= dims[i]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT "\n", + new_size, arr_size); + return 1; + } + } + else { /* [[1,2]] -> [[1],[2]] */ + int i, j; + npy_intp d; + int effrank; + npy_intp size; + for (i = 0, effrank = 0; i < PyArray_NDIM(arr); ++i) + if (PyArray_DIM(arr, i) > 1) + ++effrank; + if (dims[rank - 1] >= 0) + if (effrank > rank) { + PyErr_Format(PyExc_ValueError, + "too many axes: %d (effrank=%d), " + "expected rank=%d\n", + PyArray_NDIM(arr), effrank, rank); + return 1; + } + + for (i = 0, j = 0; i < rank; ++i) { + while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j; + if (j >= PyArray_NDIM(arr)) + d = 1; + else + d = PyArray_DIM(arr, j++); + if (dims[i] >= 0) { + if (d > 1 && d != dims[i]) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- %d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT + " (real index=%d)\n", + i, dims[i], d, j-1); + PyErr_SetString(PyExc_ValueError, mess); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else + dims[i] = d; + } + + for (i = rank; i < PyArray_NDIM(arr); + ++i) { /* [[1,2],[3,4]] -> [1,2,3,4] */ + while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j; + if (j >= PyArray_NDIM(arr)) + d = 1; + else + d = PyArray_DIM(arr, j++); + dims[rank - 1] *= d; + } + for (i = 0, size = 1; i < rank; ++i) size *= dims[i]; + if (size != arr_size) { + char msg[200]; + int len; + snprintf(msg, sizeof(msg), + "unexpected array size: size=%" NPY_INTP_FMT + ", arr_size=%" NPY_INTP_FMT + ", rank=%d, effrank=%d, arr.nd=%d, dims=[", + size, arr_size, rank, effrank, PyArray_NDIM(arr)); + for (i = 0; i < rank; ++i) { + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT, + dims[i]); + } + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " ], arr.dims=["); + for (i = 0; i < PyArray_NDIM(arr); ++i) { + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT, + PyArray_DIM(arr, i)); + } + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " ]\n"); + PyErr_SetString(PyExc_ValueError, msg); + return 1; + } + } +#ifdef DEBUG_COPY_ND_ARRAY + printf("check_and_fix_dimensions:end: dims="); + dump_dims(rank, dims); +#endif + return 0; +} + +/* End of file: array_from_pyobj.c */ + +/************************* copy_ND_array *******************************/ + +extern int +copy_ND_array(const PyArrayObject *arr, PyArrayObject *out) +{ + F2PY_REPORT_ON_ARRAY_COPY_FROMARR; + return PyArray_CopyInto(out, (PyArrayObject *)arr); +} + +/********************* Various utility functions ***********************/ + +extern int +f2py_describe(PyObject *obj, char *buf) { + /* + Write the description of a Python object to buf. The caller must + provide buffer with size sufficient to write the description. + + Return 1 on success. + */ + char localbuf[F2PY_MESSAGE_BUFFER_SIZE]; + if (PyBytes_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PyBytes_GET_SIZE(obj), Py_TYPE(obj)->tp_name); + } else if (PyUnicode_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PyUnicode_GET_LENGTH(obj), Py_TYPE(obj)->tp_name); + } else if (PyArray_CheckScalar(obj)) { + PyArrayObject* arr = (PyArrayObject*)obj; + sprintf(localbuf, "%c%" NPY_INTP_FMT "-%s-scalar", PyArray_DESCR(arr)->kind, PyArray_ITEMSIZE(arr), Py_TYPE(obj)->tp_name); + } else if (PyArray_Check(obj)) { + int i; + PyArrayObject* arr = (PyArrayObject*)obj; + strcpy(localbuf, "("); + for (i=0; ikind, PyArray_ITEMSIZE(arr), Py_TYPE(obj)->tp_name); + } else if (PySequence_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PySequence_Length(obj), Py_TYPE(obj)->tp_name); + } else { + sprintf(localbuf, "%s instance", Py_TYPE(obj)->tp_name); + } + // TODO: detect the size of buf and make sure that size(buf) >= size(localbuf). + strcpy(buf, localbuf); + return 1; +} + +extern npy_intp +f2py_size_impl(PyArrayObject* var, ...) +{ + npy_intp sz = 0; + npy_intp dim; + npy_intp rank; + va_list argp; + va_start(argp, var); + dim = va_arg(argp, npy_int); + if (dim==-1) + { + sz = PyArray_SIZE(var); + } + else + { + rank = PyArray_NDIM(var); + if (dim>=1 && dim<=rank) + sz = PyArray_DIM(var, dim-1); + else + fprintf(stderr, "f2py_size: 2nd argument value=%" NPY_INTP_FMT + " fails to satisfy 1<=value<=%" NPY_INTP_FMT + ". Result will be 0.\n", dim, rank); + } + va_end(argp); + return sz; +} + +/*********************************************/ +/* Compatibility functions for Python >= 3.0 */ +/*********************************************/ + +PyObject * +F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) +{ + PyObject *ret = PyCapsule_New(ptr, NULL, dtor); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +void * +F2PyCapsule_AsVoidPtr(PyObject *obj) +{ + void *ret = PyCapsule_GetPointer(obj, NULL); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +int +F2PyCapsule_Check(PyObject *ptr) +{ + return PyCapsule_CheckExact(ptr); +} + +#ifdef __cplusplus +} +#endif +/************************* EOF fortranobject.c *******************************/ diff --git a/phivenv/Lib/site-packages/numpy/f2py/src/fortranobject.h b/phivenv/Lib/site-packages/numpy/f2py/src/fortranobject.h new file mode 100644 index 0000000000000000000000000000000000000000..31d63588efd0c90d696c4ffebbaf836c9cc87700 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/src/fortranobject.h @@ -0,0 +1,173 @@ +#ifndef Py_FORTRANOBJECT_H +#define Py_FORTRANOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#ifndef NPY_NO_DEPRECATED_API +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#endif +#ifdef FORTRANOBJECT_C +#define NO_IMPORT_ARRAY +#endif +#define PY_ARRAY_UNIQUE_SYMBOL _npy_f2py_ARRAY_API +#include "numpy/arrayobject.h" +#include "numpy/npy_3kcompat.h" + +#ifdef F2PY_REPORT_ATEXIT +#include +// clang-format off +extern void f2py_start_clock(void); +extern void f2py_stop_clock(void); +extern void f2py_start_call_clock(void); +extern void f2py_stop_call_clock(void); +extern void f2py_cb_start_clock(void); +extern void f2py_cb_stop_clock(void); +extern void f2py_cb_start_call_clock(void); +extern void f2py_cb_stop_call_clock(void); +extern void f2py_report_on_exit(int, void *); +// clang-format on +#endif + +#ifdef DMALLOC +#include "dmalloc.h" +#endif + +/* Fortran object interface */ + +/* +123456789-123456789-123456789-123456789-123456789-123456789-123456789-12 + +PyFortranObject represents various Fortran objects: +Fortran (module) routines, COMMON blocks, module data. + +Author: Pearu Peterson +*/ + +#define F2PY_MAX_DIMS 40 +#define F2PY_MESSAGE_BUFFER_SIZE 300 // Increase on "stack smashing detected" + +typedef void (*f2py_set_data_func)(char *, npy_intp *); +typedef void (*f2py_void_func)(void); +typedef void (*f2py_init_func)(int *, npy_intp *, f2py_set_data_func, int *); + +/*typedef void* (*f2py_c_func)(void*,...);*/ + +typedef void *(*f2pycfunc)(void); + +typedef struct { + char *name; /* attribute (array||routine) name */ + int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, + || rank=-1 for Fortran routine */ + struct { + npy_intp d[F2PY_MAX_DIMS]; + } dims; /* dimensions of the array, || not used */ + int type; /* PyArray_ || not used */ + int elsize; /* Element size || not used */ + char *data; /* pointer to array || Fortran routine */ + f2py_init_func func; /* initialization function for + allocatable arrays: + func(&rank,dims,set_ptr_func,name,len(name)) + || C/API wrapper for Fortran routine */ + char *doc; /* documentation string; only recommended + for routines. */ +} FortranDataDef; + +typedef struct { + PyObject_HEAD + int len; /* Number of attributes */ + FortranDataDef *defs; /* An array of FortranDataDef's */ + PyObject *dict; /* Fortran object attribute dictionary */ +} PyFortranObject; + +#define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type) +#define PyFortran_Check1(op) (0 == strcmp(Py_TYPE(op)->tp_name, "fortran")) + +extern PyTypeObject PyFortran_Type; +extern int +F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj); +extern PyObject * +PyFortranObject_New(FortranDataDef *defs, f2py_void_func init); +extern PyObject * +PyFortranObject_NewAsAttr(FortranDataDef *defs); + +PyObject * +F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); +void * +F2PyCapsule_AsVoidPtr(PyObject *obj); +int +F2PyCapsule_Check(PyObject *ptr); + +extern void * +F2PySwapThreadLocalCallbackPtr(char *key, void *ptr); +extern void * +F2PyGetThreadLocalCallbackPtr(char *key); + +#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS) +#define F2PY_INTENT_IN 1 +#define F2PY_INTENT_INOUT 2 +#define F2PY_INTENT_OUT 4 +#define F2PY_INTENT_HIDE 8 +#define F2PY_INTENT_CACHE 16 +#define F2PY_INTENT_COPY 32 +#define F2PY_INTENT_C 64 +#define F2PY_OPTIONAL 128 +#define F2PY_INTENT_INPLACE 256 +#define F2PY_INTENT_ALIGNED4 512 +#define F2PY_INTENT_ALIGNED8 1024 +#define F2PY_INTENT_ALIGNED16 2048 + +#define ARRAY_ISALIGNED(ARR, SIZE) ((size_t)(PyArray_DATA(ARR)) % (SIZE) == 0) +#define F2PY_ALIGN4(intent) (intent & F2PY_INTENT_ALIGNED4) +#define F2PY_ALIGN8(intent) (intent & F2PY_INTENT_ALIGNED8) +#define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) + +#define F2PY_GET_ALIGNMENT(intent) \ + (F2PY_ALIGN4(intent) \ + ? 4 \ + : (F2PY_ALIGN8(intent) ? 8 : (F2PY_ALIGN16(intent) ? 16 : 1))) +#define F2PY_CHECK_ALIGNMENT(arr, intent) \ + ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) +#define F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr) ((PyArray_DESCR(arr)->type_num == NPY_STRING && PyArray_ITEMSIZE(arr) >= 1) \ + || PyArray_DESCR(arr)->type_num == NPY_UINT8) +#define F2PY_IS_UNICODE_ARRAY(arr) (PyArray_DESCR(arr)->type_num == NPY_UNICODE) + +extern PyArrayObject * +ndarray_from_pyobj(const int type_num, const int elsize_, npy_intp *dims, + const int rank, const int intent, PyObject *obj, + const char *errmess); + +extern PyArrayObject * +array_from_pyobj(const int type_num, npy_intp *dims, const int rank, + const int intent, PyObject *obj); +extern int +copy_ND_array(const PyArrayObject *in, PyArrayObject *out); + +#ifdef DEBUG_COPY_ND_ARRAY +extern void +dump_attrs(const PyArrayObject *arr); +#endif + + extern int f2py_describe(PyObject *obj, char *buf); + + /* Utility CPP macros and functions that can be used in signature file + expressions. See signature-file.rst for documentation. + */ + +#define f2py_itemsize(var) (PyArray_ITEMSIZE(capi_ ## var ## _as_array)) +#define f2py_size(var, ...) f2py_size_impl((PyArrayObject *)(capi_ ## var ## _as_array), ## __VA_ARGS__, -1) +#define f2py_rank(var) var ## _Rank +#define f2py_shape(var,dim) var ## _Dims[dim] +#define f2py_len(var) f2py_shape(var,0) +#define f2py_fshape(var,dim) f2py_shape(var,rank(var)-dim-1) +#define f2py_flen(var) f2py_fshape(var,0) +#define f2py_slen(var) capi_ ## var ## _len + + extern npy_intp f2py_size_impl(PyArrayObject* var, ...); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FORTRANOBJECT_H */ diff --git a/phivenv/Lib/site-packages/numpy/f2py/symbolic.py b/phivenv/Lib/site-packages/numpy/f2py/symbolic.py new file mode 100644 index 0000000000000000000000000000000000000000..fbc17c325d7a33b57c7faaf0c4b010f7837a5c5f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/symbolic.py @@ -0,0 +1,1517 @@ +"""Fortran/C symbolic expressions + +References: +- J3/21-007: Draft Fortran 202x. https://j3-fortran.org/doc/year/21/21-007.pdf + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" + +# To analyze Fortran expressions to solve dimensions specifications, +# for instances, we implement a minimal symbolic engine for parsing +# expressions into a tree of expression instances. As a first +# instance, we care only about arithmetic expressions involving +# integers and operations like addition (+), subtraction (-), +# multiplication (*), division (Fortran / is Python //, Fortran // is +# concatenate), and exponentiation (**). In addition, .pyf files may +# contain C expressions that support here is implemented as well. +# +# TODO: support logical constants (Op.BOOLEAN) +# TODO: support logical operators (.AND., ...) +# TODO: support defined operators (.MYOP., ...) +# +__all__ = ['Expr'] + + +import re +import warnings +from enum import Enum +from math import gcd + + +class Language(Enum): + """ + Used as Expr.tostring language argument. + """ + Python = 0 + Fortran = 1 + C = 2 + + +class Op(Enum): + """ + Used as Expr op attribute. + """ + INTEGER = 10 + REAL = 12 + COMPLEX = 15 + STRING = 20 + ARRAY = 30 + SYMBOL = 40 + TERNARY = 100 + APPLY = 200 + INDEXING = 210 + CONCAT = 220 + RELATIONAL = 300 + TERMS = 1000 + FACTORS = 2000 + REF = 3000 + DEREF = 3001 + + +class RelOp(Enum): + """ + Used in Op.RELATIONAL expression to specify the function part. + """ + EQ = 1 + NE = 2 + LT = 3 + LE = 4 + GT = 5 + GE = 6 + + @classmethod + def fromstring(cls, s, language=Language.C): + if language is Language.Fortran: + return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE, + '.lt.': RelOp.LT, '.le.': RelOp.LE, + '.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()] + return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT, + '<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s] + + def tostring(self, language=Language.C): + if language is Language.Fortran: + return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.', + RelOp.LT: '.lt.', RelOp.LE: '.le.', + RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self] + return {RelOp.EQ: '==', RelOp.NE: '!=', + RelOp.LT: '<', RelOp.LE: '<=', + RelOp.GT: '>', RelOp.GE: '>='}[self] + + +class ArithOp(Enum): + """ + Used in Op.APPLY expression to specify the function part. + """ + POS = 1 + NEG = 2 + ADD = 3 + SUB = 4 + MUL = 5 + DIV = 6 + POW = 7 + + +class OpError(Exception): + pass + + +class Precedence(Enum): + """ + Used as Expr.tostring precedence argument. + """ + ATOM = 0 + POWER = 1 + UNARY = 2 + PRODUCT = 3 + SUM = 4 + LT = 6 + EQ = 7 + LAND = 11 + LOR = 12 + TERNARY = 13 + ASSIGN = 14 + TUPLE = 15 + NONE = 100 + + +integer_types = (int,) +number_types = (int, float) + + +def _pairs_add(d, k, v): + # Internal utility method for updating terms and factors data. + c = d.get(k) + if c is None: + d[k] = v + else: + c = c + v + if c: + d[k] = c + else: + del d[k] + + +class ExprWarning(UserWarning): + pass + + +def ewarn(message): + warnings.warn(message, ExprWarning, stacklevel=2) + + +class Expr: + """Represents a Fortran expression as a op-data pair. + + Expr instances are hashable and sortable. + """ + + @staticmethod + def parse(s, language=Language.C): + """Parse a Fortran expression to a Expr. + """ + return fromstring(s, language=language) + + def __init__(self, op, data): + assert isinstance(op, Op) + + # sanity checks + if op is Op.INTEGER: + # data is a 2-tuple of numeric object and a kind value + # (default is 4) + assert isinstance(data, tuple) and len(data) == 2 + assert isinstance(data[0], int) + assert isinstance(data[1], (int, str)), data + elif op is Op.REAL: + # data is a 2-tuple of numeric object and a kind value + # (default is 4) + assert isinstance(data, tuple) and len(data) == 2 + assert isinstance(data[0], float) + assert isinstance(data[1], (int, str)), data + elif op is Op.COMPLEX: + # data is a 2-tuple of constant expressions + assert isinstance(data, tuple) and len(data) == 2 + elif op is Op.STRING: + # data is a 2-tuple of quoted string and a kind value + # (default is 1) + assert isinstance(data, tuple) and len(data) == 2 + assert (isinstance(data[0], str) + and data[0][::len(data[0])-1] in ('""', "''", '@@')) + assert isinstance(data[1], (int, str)), data + elif op is Op.SYMBOL: + # data is any hashable object + assert hash(data) is not None + elif op in (Op.ARRAY, Op.CONCAT): + # data is a tuple of expressions + assert isinstance(data, tuple) + assert all(isinstance(item, Expr) for item in data), data + elif op in (Op.TERMS, Op.FACTORS): + # data is {:} where dict values + # are nonzero Python integers + assert isinstance(data, dict) + elif op is Op.APPLY: + # data is (, , ) where + # operands are Expr instances + assert isinstance(data, tuple) and len(data) == 3 + # function is any hashable object + assert hash(data[0]) is not None + assert isinstance(data[1], tuple) + assert isinstance(data[2], dict) + elif op is Op.INDEXING: + # data is (, ) + assert isinstance(data, tuple) and len(data) == 2 + # function is any hashable object + assert hash(data[0]) is not None + elif op is Op.TERNARY: + # data is (, , ) + assert isinstance(data, tuple) and len(data) == 3 + elif op in (Op.REF, Op.DEREF): + # data is Expr instance + assert isinstance(data, Expr) + elif op is Op.RELATIONAL: + # data is (, , ) + assert isinstance(data, tuple) and len(data) == 3 + else: + raise NotImplementedError( + f'unknown op or missing sanity check: {op}') + + self.op = op + self.data = data + + def __eq__(self, other): + return (isinstance(other, Expr) + and self.op is other.op + and self.data == other.data) + + def __hash__(self): + if self.op in (Op.TERMS, Op.FACTORS): + data = tuple(sorted(self.data.items())) + elif self.op is Op.APPLY: + data = self.data[:2] + tuple(sorted(self.data[2].items())) + else: + data = self.data + return hash((self.op, data)) + + def __lt__(self, other): + if isinstance(other, Expr): + if self.op is not other.op: + return self.op.value < other.op.value + if self.op in (Op.TERMS, Op.FACTORS): + return (tuple(sorted(self.data.items())) + < tuple(sorted(other.data.items()))) + if self.op is Op.APPLY: + if self.data[:2] != other.data[:2]: + return self.data[:2] < other.data[:2] + return tuple(sorted(self.data[2].items())) < tuple( + sorted(other.data[2].items())) + return self.data < other.data + return NotImplemented + + def __le__(self, other): return self == other or self < other + + def __gt__(self, other): return not (self <= other) + + def __ge__(self, other): return not (self < other) + + def __repr__(self): + return f'{type(self).__name__}({self.op}, {self.data!r})' + + def __str__(self): + return self.tostring() + + def tostring(self, parent_precedence=Precedence.NONE, + language=Language.Fortran): + """Return a string representation of Expr. + """ + if self.op in (Op.INTEGER, Op.REAL): + precedence = (Precedence.SUM if self.data[0] < 0 + else Precedence.ATOM) + r = str(self.data[0]) + (f'_{self.data[1]}' + if self.data[1] != 4 else '') + elif self.op is Op.COMPLEX: + r = ', '.join(item.tostring(Precedence.TUPLE, language=language) + for item in self.data) + r = '(' + r + ')' + precedence = Precedence.ATOM + elif self.op is Op.SYMBOL: + precedence = Precedence.ATOM + r = str(self.data) + elif self.op is Op.STRING: + r = self.data[0] + if self.data[1] != 1: + r = self.data[1] + '_' + r + precedence = Precedence.ATOM + elif self.op is Op.ARRAY: + r = ', '.join(item.tostring(Precedence.TUPLE, language=language) + for item in self.data) + r = '[' + r + ']' + precedence = Precedence.ATOM + elif self.op is Op.TERMS: + terms = [] + for term, coeff in sorted(self.data.items()): + if coeff < 0: + op = ' - ' + coeff = -coeff + else: + op = ' + ' + if coeff == 1: + term = term.tostring(Precedence.SUM, language=language) + else: + if term == as_number(1): + term = str(coeff) + else: + term = f'{coeff} * ' + term.tostring( + Precedence.PRODUCT, language=language) + if terms: + terms.append(op) + elif op == ' - ': + terms.append('-') + terms.append(term) + r = ''.join(terms) or '0' + precedence = Precedence.SUM if terms else Precedence.ATOM + elif self.op is Op.FACTORS: + factors = [] + tail = [] + for base, exp in sorted(self.data.items()): + op = ' * ' + if exp == 1: + factor = base.tostring(Precedence.PRODUCT, + language=language) + elif language is Language.C: + if exp in range(2, 10): + factor = base.tostring(Precedence.PRODUCT, + language=language) + factor = ' * '.join([factor] * exp) + elif exp in range(-10, 0): + factor = base.tostring(Precedence.PRODUCT, + language=language) + tail += [factor] * -exp + continue + else: + factor = base.tostring(Precedence.TUPLE, + language=language) + factor = f'pow({factor}, {exp})' + else: + factor = base.tostring(Precedence.POWER, + language=language) + f' ** {exp}' + if factors: + factors.append(op) + factors.append(factor) + if tail: + if not factors: + factors += ['1'] + factors += ['/', '(', ' * '.join(tail), ')'] + r = ''.join(factors) or '1' + precedence = Precedence.PRODUCT if factors else Precedence.ATOM + elif self.op is Op.APPLY: + name, args, kwargs = self.data + if name is ArithOp.DIV and language is Language.C: + numer, denom = [arg.tostring(Precedence.PRODUCT, + language=language) + for arg in args] + r = f'{numer} / {denom}' + precedence = Precedence.PRODUCT + else: + args = [arg.tostring(Precedence.TUPLE, language=language) + for arg in args] + args += [k + '=' + v.tostring(Precedence.NONE) + for k, v in kwargs.items()] + r = f'{name}({", ".join(args)})' + precedence = Precedence.ATOM + elif self.op is Op.INDEXING: + name = self.data[0] + args = [arg.tostring(Precedence.TUPLE, language=language) + for arg in self.data[1:]] + r = f'{name}[{", ".join(args)}]' + precedence = Precedence.ATOM + elif self.op is Op.CONCAT: + args = [arg.tostring(Precedence.PRODUCT, language=language) + for arg in self.data] + r = " // ".join(args) + precedence = Precedence.PRODUCT + elif self.op is Op.TERNARY: + cond, expr1, expr2 = [a.tostring(Precedence.TUPLE, + language=language) + for a in self.data] + if language is Language.C: + r = f'({cond}?{expr1}:{expr2})' + elif language is Language.Python: + r = f'({expr1} if {cond} else {expr2})' + elif language is Language.Fortran: + r = f'merge({expr1}, {expr2}, {cond})' + else: + raise NotImplementedError( + f'tostring for {self.op} and {language}') + precedence = Precedence.ATOM + elif self.op is Op.REF: + r = '&' + self.data.tostring(Precedence.UNARY, language=language) + precedence = Precedence.UNARY + elif self.op is Op.DEREF: + r = '*' + self.data.tostring(Precedence.UNARY, language=language) + precedence = Precedence.UNARY + elif self.op is Op.RELATIONAL: + rop, left, right = self.data + precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE) + else Precedence.LT) + left = left.tostring(precedence, language=language) + right = right.tostring(precedence, language=language) + rop = rop.tostring(language=language) + r = f'{left} {rop} {right}' + else: + raise NotImplementedError(f'tostring for op {self.op}') + if parent_precedence.value < precedence.value: + # If parent precedence is higher than operand precedence, + # operand will be enclosed in parenthesis. + return '(' + r + ')' + return r + + def __pos__(self): + return self + + def __neg__(self): + return self * -1 + + def __add__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if self.op is other.op: + if self.op in (Op.INTEGER, Op.REAL): + return as_number( + self.data[0] + other.data[0], + max(self.data[1], other.data[1])) + if self.op is Op.COMPLEX: + r1, i1 = self.data + r2, i2 = other.data + return as_complex(r1 + r2, i1 + i2) + if self.op is Op.TERMS: + r = Expr(self.op, dict(self.data)) + for k, v in other.data.items(): + _pairs_add(r.data, k, v) + return normalize(r) + if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL): + return self + as_complex(other) + elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX: + return as_complex(self) + other + elif self.op is Op.REAL and other.op is Op.INTEGER: + return self + as_real(other, kind=self.data[1]) + elif self.op is Op.INTEGER and other.op is Op.REAL: + return as_real(self, kind=other.data[1]) + other + return as_terms(self) + as_terms(other) + return NotImplemented + + def __radd__(self, other): + if isinstance(other, number_types): + return as_number(other) + self + return NotImplemented + + def __sub__(self, other): + return self + (-other) + + def __rsub__(self, other): + if isinstance(other, number_types): + return as_number(other) - self + return NotImplemented + + def __mul__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if self.op is other.op: + if self.op in (Op.INTEGER, Op.REAL): + return as_number(self.data[0] * other.data[0], + max(self.data[1], other.data[1])) + elif self.op is Op.COMPLEX: + r1, i1 = self.data + r2, i2 = other.data + return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1) + + if self.op is Op.FACTORS: + r = Expr(self.op, dict(self.data)) + for k, v in other.data.items(): + _pairs_add(r.data, k, v) + return normalize(r) + elif self.op is Op.TERMS: + r = Expr(self.op, {}) + for t1, c1 in self.data.items(): + for t2, c2 in other.data.items(): + _pairs_add(r.data, t1 * t2, c1 * c2) + return normalize(r) + + if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL): + return self * as_complex(other) + elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL): + return as_complex(self) * other + elif self.op is Op.REAL and other.op is Op.INTEGER: + return self * as_real(other, kind=self.data[1]) + elif self.op is Op.INTEGER and other.op is Op.REAL: + return as_real(self, kind=other.data[1]) * other + + if self.op is Op.TERMS: + return self * as_terms(other) + elif other.op is Op.TERMS: + return as_terms(self) * other + + return as_factors(self) * as_factors(other) + return NotImplemented + + def __rmul__(self, other): + if isinstance(other, number_types): + return as_number(other) * self + return NotImplemented + + def __pow__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if other.op is Op.INTEGER: + exponent = other.data[0] + # TODO: other kind not used + if exponent == 0: + return as_number(1) + if exponent == 1: + return self + if exponent > 0: + if self.op is Op.FACTORS: + r = Expr(self.op, {}) + for k, v in self.data.items(): + r.data[k] = v * exponent + return normalize(r) + return self * (self ** (exponent - 1)) + elif exponent != -1: + return (self ** (-exponent)) ** -1 + return Expr(Op.FACTORS, {self: exponent}) + return as_apply(ArithOp.POW, self, other) + return NotImplemented + + def __truediv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + # Fortran / is different from Python /: + # - `/` is a truncate operation for integer operands + return normalize(as_apply(ArithOp.DIV, self, other)) + return NotImplemented + + def __rtruediv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + return other / self + return NotImplemented + + def __floordiv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + # Fortran // is different from Python //: + # - `//` is a concatenate operation for string operands + return normalize(Expr(Op.CONCAT, (self, other))) + return NotImplemented + + def __rfloordiv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + return other // self + return NotImplemented + + def __call__(self, *args, **kwargs): + # In Fortran, parenthesis () are use for both function call as + # well as indexing operations. + # + # TODO: implement a method for deciding when __call__ should + # return an INDEXING expression. + return as_apply(self, *map(as_expr, args), + **dict((k, as_expr(v)) for k, v in kwargs.items())) + + def __getitem__(self, index): + # Provided to support C indexing operations that .pyf files + # may contain. + index = as_expr(index) + if not isinstance(index, tuple): + index = index, + if len(index) > 1: + ewarn(f'C-index should be a single expression but got `{index}`') + return Expr(Op.INDEXING, (self,) + index) + + def substitute(self, symbols_map): + """Recursively substitute symbols with values in symbols map. + + Symbols map is a dictionary of symbol-expression pairs. + """ + if self.op is Op.SYMBOL: + value = symbols_map.get(self) + if value is None: + return self + m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data) + if m: + # complement to fromstring method + items, paren = m.groups() + if paren in ['ROUNDDIV', 'SQUARE']: + return as_array(value) + assert paren == 'ROUND', (paren, value) + return value + if self.op in (Op.INTEGER, Op.REAL, Op.STRING): + return self + if self.op in (Op.ARRAY, Op.COMPLEX): + return Expr(self.op, tuple(item.substitute(symbols_map) + for item in self.data)) + if self.op is Op.CONCAT: + return normalize(Expr(self.op, tuple(item.substitute(symbols_map) + for item in self.data))) + if self.op is Op.TERMS: + r = None + for term, coeff in self.data.items(): + if r is None: + r = term.substitute(symbols_map) * coeff + else: + r += term.substitute(symbols_map) * coeff + if r is None: + ewarn('substitute: empty TERMS expression interpreted as' + ' int-literal 0') + return as_number(0) + return r + if self.op is Op.FACTORS: + r = None + for base, exponent in self.data.items(): + if r is None: + r = base.substitute(symbols_map) ** exponent + else: + r *= base.substitute(symbols_map) ** exponent + if r is None: + ewarn('substitute: empty FACTORS expression interpreted' + ' as int-literal 1') + return as_number(1) + return r + if self.op is Op.APPLY: + target, args, kwargs = self.data + if isinstance(target, Expr): + target = target.substitute(symbols_map) + args = tuple(a.substitute(symbols_map) for a in args) + kwargs = dict((k, v.substitute(symbols_map)) + for k, v in kwargs.items()) + return normalize(Expr(self.op, (target, args, kwargs))) + if self.op is Op.INDEXING: + func = self.data[0] + if isinstance(func, Expr): + func = func.substitute(symbols_map) + args = tuple(a.substitute(symbols_map) for a in self.data[1:]) + return normalize(Expr(self.op, (func,) + args)) + if self.op is Op.TERNARY: + operands = tuple(a.substitute(symbols_map) for a in self.data) + return normalize(Expr(self.op, operands)) + if self.op in (Op.REF, Op.DEREF): + return normalize(Expr(self.op, self.data.substitute(symbols_map))) + if self.op is Op.RELATIONAL: + rop, left, right = self.data + left = left.substitute(symbols_map) + right = right.substitute(symbols_map) + return normalize(Expr(self.op, (rop, left, right))) + raise NotImplementedError(f'substitute method for {self.op}: {self!r}') + + def traverse(self, visit, *args, **kwargs): + """Traverse expression tree with visit function. + + The visit function is applied to an expression with given args + and kwargs. + + Traverse call returns an expression returned by visit when not + None, otherwise return a new normalized expression with + traverse-visit sub-expressions. + """ + result = visit(self, *args, **kwargs) + if result is not None: + return result + + if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL): + return self + elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY): + return normalize(Expr(self.op, tuple( + item.traverse(visit, *args, **kwargs) + for item in self.data))) + elif self.op in (Op.TERMS, Op.FACTORS): + data = {} + for k, v in self.data.items(): + k = k.traverse(visit, *args, **kwargs) + v = (v.traverse(visit, *args, **kwargs) + if isinstance(v, Expr) else v) + if k in data: + v = data[k] + v + data[k] = v + return normalize(Expr(self.op, data)) + elif self.op is Op.APPLY: + obj = self.data[0] + func = (obj.traverse(visit, *args, **kwargs) + if isinstance(obj, Expr) else obj) + operands = tuple(operand.traverse(visit, *args, **kwargs) + for operand in self.data[1]) + kwoperands = dict((k, v.traverse(visit, *args, **kwargs)) + for k, v in self.data[2].items()) + return normalize(Expr(self.op, (func, operands, kwoperands))) + elif self.op is Op.INDEXING: + obj = self.data[0] + obj = (obj.traverse(visit, *args, **kwargs) + if isinstance(obj, Expr) else obj) + indices = tuple(index.traverse(visit, *args, **kwargs) + for index in self.data[1:]) + return normalize(Expr(self.op, (obj,) + indices)) + elif self.op in (Op.REF, Op.DEREF): + return normalize(Expr(self.op, + self.data.traverse(visit, *args, **kwargs))) + elif self.op is Op.RELATIONAL: + rop, left, right = self.data + left = left.traverse(visit, *args, **kwargs) + right = right.traverse(visit, *args, **kwargs) + return normalize(Expr(self.op, (rop, left, right))) + raise NotImplementedError(f'traverse method for {self.op}') + + def contains(self, other): + """Check if self contains other. + """ + found = [] + + def visit(expr, found=found): + if found: + return expr + elif expr == other: + found.append(1) + return expr + + self.traverse(visit) + + return len(found) != 0 + + def symbols(self): + """Return a set of symbols contained in self. + """ + found = set() + + def visit(expr, found=found): + if expr.op is Op.SYMBOL: + found.add(expr) + + self.traverse(visit) + + return found + + def polynomial_atoms(self): + """Return a set of expressions used as atoms in polynomial self. + """ + found = set() + + def visit(expr, found=found): + if expr.op is Op.FACTORS: + for b in expr.data: + b.traverse(visit) + return expr + if expr.op in (Op.TERMS, Op.COMPLEX): + return + if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp): + if expr.data[0] is ArithOp.POW: + expr.data[1][0].traverse(visit) + return expr + return + if expr.op in (Op.INTEGER, Op.REAL): + return expr + + found.add(expr) + + if expr.op in (Op.INDEXING, Op.APPLY): + return expr + + self.traverse(visit) + + return found + + def linear_solve(self, symbol): + """Return a, b such that a * symbol + b == self. + + If self is not linear with respect to symbol, raise RuntimeError. + """ + b = self.substitute({symbol: as_number(0)}) + ax = self - b + a = ax.substitute({symbol: as_number(1)}) + + zero, _ = as_numer_denom(a * symbol - ax) + + if zero != as_number(0): + raise RuntimeError(f'not a {symbol}-linear equation:' + f' {a} * {symbol} + {b} == {self}') + return a, b + + +def normalize(obj): + """Normalize Expr and apply basic evaluation methods. + """ + if not isinstance(obj, Expr): + return obj + + if obj.op is Op.TERMS: + d = {} + for t, c in obj.data.items(): + if c == 0: + continue + if t.op is Op.COMPLEX and c != 1: + t = t * c + c = 1 + if t.op is Op.TERMS: + for t1, c1 in t.data.items(): + _pairs_add(d, t1, c1 * c) + else: + _pairs_add(d, t, c) + if len(d) == 0: + # TODO: determine correct kind + return as_number(0) + elif len(d) == 1: + (t, c), = d.items() + if c == 1: + return t + return Expr(Op.TERMS, d) + + if obj.op is Op.FACTORS: + coeff = 1 + d = {} + for b, e in obj.data.items(): + if e == 0: + continue + if b.op is Op.TERMS and isinstance(e, integer_types) and e > 1: + # expand integer powers of sums + b = b * (b ** (e - 1)) + e = 1 + + if b.op in (Op.INTEGER, Op.REAL): + if e == 1: + coeff *= b.data[0] + elif e > 0: + coeff *= b.data[0] ** e + else: + _pairs_add(d, b, e) + elif b.op is Op.FACTORS: + if e > 0 and isinstance(e, integer_types): + for b1, e1 in b.data.items(): + _pairs_add(d, b1, e1 * e) + else: + _pairs_add(d, b, e) + else: + _pairs_add(d, b, e) + if len(d) == 0 or coeff == 0: + # TODO: determine correct kind + assert isinstance(coeff, number_types) + return as_number(coeff) + elif len(d) == 1: + (b, e), = d.items() + if e == 1: + t = b + else: + t = Expr(Op.FACTORS, d) + if coeff == 1: + return t + return Expr(Op.TERMS, {t: coeff}) + elif coeff == 1: + return Expr(Op.FACTORS, d) + else: + return Expr(Op.TERMS, {Expr(Op.FACTORS, d): coeff}) + + if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV: + dividend, divisor = obj.data[1] + t1, c1 = as_term_coeff(dividend) + t2, c2 = as_term_coeff(divisor) + if isinstance(c1, integer_types) and isinstance(c2, integer_types): + g = gcd(c1, c2) + c1, c2 = c1//g, c2//g + else: + c1, c2 = c1/c2, 1 + + if t1.op is Op.APPLY and t1.data[0] is ArithOp.DIV: + numer = t1.data[1][0] * c1 + denom = t1.data[1][1] * t2 * c2 + return as_apply(ArithOp.DIV, numer, denom) + + if t2.op is Op.APPLY and t2.data[0] is ArithOp.DIV: + numer = t2.data[1][1] * t1 * c1 + denom = t2.data[1][0] * c2 + return as_apply(ArithOp.DIV, numer, denom) + + d = dict(as_factors(t1).data) + for b, e in as_factors(t2).data.items(): + _pairs_add(d, b, -e) + numer, denom = {}, {} + for b, e in d.items(): + if e > 0: + numer[b] = e + else: + denom[b] = -e + numer = normalize(Expr(Op.FACTORS, numer)) * c1 + denom = normalize(Expr(Op.FACTORS, denom)) * c2 + + if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] == 1: + # TODO: denom kind not used + return numer + return as_apply(ArithOp.DIV, numer, denom) + + if obj.op is Op.CONCAT: + lst = [obj.data[0]] + for s in obj.data[1:]: + last = lst[-1] + if ( + last.op is Op.STRING + and s.op is Op.STRING + and last.data[0][0] in '"\'' + and s.data[0][0] == last.data[0][-1] + ): + new_last = as_string(last.data[0][:-1] + s.data[0][1:], + max(last.data[1], s.data[1])) + lst[-1] = new_last + else: + lst.append(s) + if len(lst) == 1: + return lst[0] + return Expr(Op.CONCAT, tuple(lst)) + + if obj.op is Op.TERNARY: + cond, expr1, expr2 = map(normalize, obj.data) + if cond.op is Op.INTEGER: + return expr1 if cond.data[0] else expr2 + return Expr(Op.TERNARY, (cond, expr1, expr2)) + + return obj + + +def as_expr(obj): + """Convert non-Expr objects to Expr objects. + """ + if isinstance(obj, complex): + return as_complex(obj.real, obj.imag) + if isinstance(obj, number_types): + return as_number(obj) + if isinstance(obj, str): + # STRING expression holds string with boundary quotes, hence + # applying repr: + return as_string(repr(obj)) + if isinstance(obj, tuple): + return tuple(map(as_expr, obj)) + return obj + + +def as_symbol(obj): + """Return object as SYMBOL expression (variable or unparsed expression). + """ + return Expr(Op.SYMBOL, obj) + + +def as_number(obj, kind=4): + """Return object as INTEGER or REAL constant. + """ + if isinstance(obj, int): + return Expr(Op.INTEGER, (obj, kind)) + if isinstance(obj, float): + return Expr(Op.REAL, (obj, kind)) + if isinstance(obj, Expr): + if obj.op in (Op.INTEGER, Op.REAL): + return obj + raise OpError(f'cannot convert {obj} to INTEGER or REAL constant') + + +def as_integer(obj, kind=4): + """Return object as INTEGER constant. + """ + if isinstance(obj, int): + return Expr(Op.INTEGER, (obj, kind)) + if isinstance(obj, Expr): + if obj.op is Op.INTEGER: + return obj + raise OpError(f'cannot convert {obj} to INTEGER constant') + + +def as_real(obj, kind=4): + """Return object as REAL constant. + """ + if isinstance(obj, int): + return Expr(Op.REAL, (float(obj), kind)) + if isinstance(obj, float): + return Expr(Op.REAL, (obj, kind)) + if isinstance(obj, Expr): + if obj.op is Op.REAL: + return obj + elif obj.op is Op.INTEGER: + return Expr(Op.REAL, (float(obj.data[0]), kind)) + raise OpError(f'cannot convert {obj} to REAL constant') + + +def as_string(obj, kind=1): + """Return object as STRING expression (string literal constant). + """ + return Expr(Op.STRING, (obj, kind)) + + +def as_array(obj): + """Return object as ARRAY expression (array constant). + """ + if isinstance(obj, Expr): + obj = obj, + return Expr(Op.ARRAY, obj) + + +def as_complex(real, imag=0): + """Return object as COMPLEX expression (complex literal constant). + """ + return Expr(Op.COMPLEX, (as_expr(real), as_expr(imag))) + + +def as_apply(func, *args, **kwargs): + """Return object as APPLY expression (function call, constructor, etc.) + """ + return Expr(Op.APPLY, + (func, tuple(map(as_expr, args)), + dict((k, as_expr(v)) for k, v in kwargs.items()))) + + +def as_ternary(cond, expr1, expr2): + """Return object as TERNARY expression (cond?expr1:expr2). + """ + return Expr(Op.TERNARY, (cond, expr1, expr2)) + + +def as_ref(expr): + """Return object as referencing expression. + """ + return Expr(Op.REF, expr) + + +def as_deref(expr): + """Return object as dereferencing expression. + """ + return Expr(Op.DEREF, expr) + + +def as_eq(left, right): + return Expr(Op.RELATIONAL, (RelOp.EQ, left, right)) + + +def as_ne(left, right): + return Expr(Op.RELATIONAL, (RelOp.NE, left, right)) + + +def as_lt(left, right): + return Expr(Op.RELATIONAL, (RelOp.LT, left, right)) + + +def as_le(left, right): + return Expr(Op.RELATIONAL, (RelOp.LE, left, right)) + + +def as_gt(left, right): + return Expr(Op.RELATIONAL, (RelOp.GT, left, right)) + + +def as_ge(left, right): + return Expr(Op.RELATIONAL, (RelOp.GE, left, right)) + + +def as_terms(obj): + """Return expression as TERMS expression. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.TERMS: + return obj + if obj.op is Op.INTEGER: + return Expr(Op.TERMS, {as_integer(1, obj.data[1]): obj.data[0]}) + if obj.op is Op.REAL: + return Expr(Op.TERMS, {as_real(1, obj.data[1]): obj.data[0]}) + return Expr(Op.TERMS, {obj: 1}) + raise OpError(f'cannot convert {type(obj)} to terms Expr') + + +def as_factors(obj): + """Return expression as FACTORS expression. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.FACTORS: + return obj + if obj.op is Op.TERMS: + if len(obj.data) == 1: + (term, coeff), = obj.data.items() + if coeff == 1: + return Expr(Op.FACTORS, {term: 1}) + return Expr(Op.FACTORS, {term: 1, Expr.number(coeff): 1}) + if ((obj.op is Op.APPLY + and obj.data[0] is ArithOp.DIV + and not obj.data[2])): + return Expr(Op.FACTORS, {obj.data[1][0]: 1, obj.data[1][1]: -1}) + return Expr(Op.FACTORS, {obj: 1}) + raise OpError(f'cannot convert {type(obj)} to terms Expr') + + +def as_term_coeff(obj): + """Return expression as term-coefficient pair. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.INTEGER: + return as_integer(1, obj.data[1]), obj.data[0] + if obj.op is Op.REAL: + return as_real(1, obj.data[1]), obj.data[0] + if obj.op is Op.TERMS: + if len(obj.data) == 1: + (term, coeff), = obj.data.items() + return term, coeff + # TODO: find common divisor of coefficients + if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV: + t, c = as_term_coeff(obj.data[1][0]) + return as_apply(ArithOp.DIV, t, obj.data[1][1]), c + return obj, 1 + raise OpError(f'cannot convert {type(obj)} to term and coeff') + + +def as_numer_denom(obj): + """Return expression as numer-denom pair. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op in (Op.INTEGER, Op.REAL, Op.COMPLEX, Op.SYMBOL, + Op.INDEXING, Op.TERNARY): + return obj, as_number(1) + elif obj.op is Op.APPLY: + if obj.data[0] is ArithOp.DIV and not obj.data[2]: + numers, denoms = map(as_numer_denom, obj.data[1]) + return numers[0] * denoms[1], numers[1] * denoms[0] + return obj, as_number(1) + elif obj.op is Op.TERMS: + numers, denoms = [], [] + for term, coeff in obj.data.items(): + n, d = as_numer_denom(term) + n = n * coeff + numers.append(n) + denoms.append(d) + numer, denom = as_number(0), as_number(1) + for i in range(len(numers)): + n = numers[i] + for j in range(len(numers)): + if i != j: + n *= denoms[j] + numer += n + denom *= denoms[i] + if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] < 0: + numer, denom = -numer, -denom + return numer, denom + elif obj.op is Op.FACTORS: + numer, denom = as_number(1), as_number(1) + for b, e in obj.data.items(): + bnumer, bdenom = as_numer_denom(b) + if e > 0: + numer *= bnumer ** e + denom *= bdenom ** e + elif e < 0: + numer *= bdenom ** (-e) + denom *= bnumer ** (-e) + return numer, denom + raise OpError(f'cannot convert {type(obj)} to numer and denom') + + +def _counter(): + # Used internally to generate unique dummy symbols + counter = 0 + while True: + counter += 1 + yield counter + + +COUNTER = _counter() + + +def eliminate_quotes(s): + """Replace quoted substrings of input string. + + Return a new string and a mapping of replacements. + """ + d = {} + + def repl(m): + kind, value = m.groups()[:2] + if kind: + # remove trailing underscore + kind = kind[:-1] + p = {"'": "SINGLE", '"': "DOUBLE"}[value[0]] + k = f'{kind}@__f2py_QUOTES_{p}_{COUNTER.__next__()}@' + d[k] = value + return k + + new_s = re.sub(r'({kind}_|)({single_quoted}|{double_quoted})'.format( + kind=r'\w[\w\d_]*', + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")'), + repl, s) + + assert '"' not in new_s + assert "'" not in new_s + + return new_s, d + + +def insert_quotes(s, d): + """Inverse of eliminate_quotes. + """ + for k, v in d.items(): + kind = k[:k.find('@')] + if kind: + kind += '_' + s = s.replace(k, kind + v) + return s + + +def replace_parenthesis(s): + """Replace substrings of input that are enclosed in parenthesis. + + Return a new string and a mapping of replacements. + """ + # Find a parenthesis pair that appears first. + + # Fortran deliminator are `(`, `)`, `[`, `]`, `(/', '/)`, `/`. + # We don't handle `/` deliminator because it is not a part of an + # expression. + left, right = None, None + mn_i = len(s) + for left_, right_ in (('(/', '/)'), + '()', + '{}', # to support C literal structs + '[]'): + i = s.find(left_) + if i == -1: + continue + if i < mn_i: + mn_i = i + left, right = left_, right_ + + if left is None: + return s, {} + + i = mn_i + j = s.find(right, i) + + while s.count(left, i + 1, j) != s.count(right, i + 1, j): + j = s.find(right, j + 1) + if j == -1: + raise ValueError(f'Mismatch of {left+right} parenthesis in {s!r}') + + p = {'(': 'ROUND', '[': 'SQUARE', '{': 'CURLY', '(/': 'ROUNDDIV'}[left] + + k = f'@__f2py_PARENTHESIS_{p}_{COUNTER.__next__()}@' + v = s[i+len(left):j] + r, d = replace_parenthesis(s[j+len(right):]) + d[k] = v + return s[:i] + k + r, d + + +def _get_parenthesis_kind(s): + assert s.startswith('@__f2py_PARENTHESIS_'), s + return s.split('_')[4] + + +def unreplace_parenthesis(s, d): + """Inverse of replace_parenthesis. + """ + for k, v in d.items(): + p = _get_parenthesis_kind(k) + left = dict(ROUND='(', SQUARE='[', CURLY='{', ROUNDDIV='(/')[p] + right = dict(ROUND=')', SQUARE=']', CURLY='}', ROUNDDIV='/)')[p] + s = s.replace(k, left + v + right) + return s + + +def fromstring(s, language=Language.C): + """Create an expression from a string. + + This is a "lazy" parser, that is, only arithmetic operations are + resolved, non-arithmetic operations are treated as symbols. + """ + r = _FromStringWorker(language=language).parse(s) + if isinstance(r, Expr): + return r + raise ValueError(f'failed to parse `{s}` to Expr instance: got `{r}`') + + +class _Pair: + # Internal class to represent a pair of expressions + + def __init__(self, left, right): + self.left = left + self.right = right + + def substitute(self, symbols_map): + left, right = self.left, self.right + if isinstance(left, Expr): + left = left.substitute(symbols_map) + if isinstance(right, Expr): + right = right.substitute(symbols_map) + return _Pair(left, right) + + def __repr__(self): + return f'{type(self).__name__}({self.left}, {self.right})' + + +class _FromStringWorker: + + def __init__(self, language=Language.C): + self.original = None + self.quotes_map = None + self.language = language + + def finalize_string(self, s): + return insert_quotes(s, self.quotes_map) + + def parse(self, inp): + self.original = inp + unquoted, self.quotes_map = eliminate_quotes(inp) + return self.process(unquoted) + + def process(self, s, context='expr'): + """Parse string within the given context. + + The context may define the result in case of ambiguous + expressions. For instance, consider expressions `f(x, y)` and + `(x, y) + (a, b)` where `f` is a function and pair `(x, y)` + denotes complex number. Specifying context as "args" or + "expr", the subexpression `(x, y)` will be parse to an + argument list or to a complex number, respectively. + """ + if isinstance(s, (list, tuple)): + return type(s)(self.process(s_, context) for s_ in s) + + assert isinstance(s, str), (type(s), s) + + # replace subexpressions in parenthesis with f2py @-names + r, raw_symbols_map = replace_parenthesis(s) + r = r.strip() + + def restore(r): + # restores subexpressions marked with f2py @-names + if isinstance(r, (list, tuple)): + return type(r)(map(restore, r)) + return unreplace_parenthesis(r, raw_symbols_map) + + # comma-separated tuple + if ',' in r: + operands = restore(r.split(',')) + if context == 'args': + return tuple(self.process(operands)) + if context == 'expr': + if len(operands) == 2: + # complex number literal + return as_complex(*self.process(operands)) + raise NotImplementedError( + f'parsing comma-separated list (context={context}): {r}') + + # ternary operation + m = re.match(r'\A([^?]+)[?]([^:]+)[:](.+)\Z', r) + if m: + assert context == 'expr', context + oper, expr1, expr2 = restore(m.groups()) + oper = self.process(oper) + expr1 = self.process(expr1) + expr2 = self.process(expr2) + return as_ternary(oper, expr1, expr2) + + # relational expression + if self.language is Language.Fortran: + m = re.match( + r'\A(.+)\s*[.](eq|ne|lt|le|gt|ge)[.]\s*(.+)\Z', r, re.I) + else: + m = re.match( + r'\A(.+)\s*([=][=]|[!][=]|[<][=]|[<]|[>][=]|[>])\s*(.+)\Z', r) + if m: + left, rop, right = m.groups() + if self.language is Language.Fortran: + rop = '.' + rop + '.' + left, right = self.process(restore((left, right))) + rop = RelOp.fromstring(rop, language=self.language) + return Expr(Op.RELATIONAL, (rop, left, right)) + + # keyword argument + m = re.match(r'\A(\w[\w\d_]*)\s*[=](.*)\Z', r) + if m: + keyname, value = m.groups() + value = restore(value) + return _Pair(keyname, self.process(value)) + + # addition/subtraction operations + operands = re.split(r'((? 1: + result = self.process(restore(operands[0] or '0')) + for op, operand in zip(operands[1::2], operands[2::2]): + operand = self.process(restore(operand)) + op = op.strip() + if op == '+': + result += operand + else: + assert op == '-' + result -= operand + return result + + # string concatenate operation + if self.language is Language.Fortran and '//' in r: + operands = restore(r.split('//')) + return Expr(Op.CONCAT, + tuple(self.process(operands))) + + # multiplication/division operations + operands = re.split(r'(?<=[@\w\d_])\s*([*]|/)', + (r if self.language is Language.C + else r.replace('**', '@__f2py_DOUBLE_STAR@'))) + if len(operands) > 1: + operands = restore(operands) + if self.language is not Language.C: + operands = [operand.replace('@__f2py_DOUBLE_STAR@', '**') + for operand in operands] + # Expression is an arithmetic product + result = self.process(operands[0]) + for op, operand in zip(operands[1::2], operands[2::2]): + operand = self.process(operand) + op = op.strip() + if op == '*': + result *= operand + else: + assert op == '/' + result /= operand + return result + + # referencing/dereferencing + if r.startswith('*') or r.startswith('&'): + op = {'*': Op.DEREF, '&': Op.REF}[r[0]] + operand = self.process(restore(r[1:])) + return Expr(op, operand) + + # exponentiation operations + if self.language is not Language.C and '**' in r: + operands = list(reversed(restore(r.split('**')))) + result = self.process(operands[0]) + for operand in operands[1:]: + operand = self.process(operand) + result = operand ** result + return result + + # int-literal-constant + m = re.match(r'\A({digit_string})({kind}|)\Z'.format( + digit_string=r'\d+', + kind=r'_(\d+|\w[\w\d_]*)'), r) + if m: + value, _, kind = m.groups() + if kind and kind.isdigit(): + kind = int(kind) + return as_integer(int(value), kind or 4) + + # real-literal-constant + m = re.match(r'\A({significant}({exponent}|)|\d+{exponent})({kind}|)\Z' + .format( + significant=r'[.]\d+|\d+[.]\d*', + exponent=r'[edED][+-]?\d+', + kind=r'_(\d+|\w[\w\d_]*)'), r) + if m: + value, _, _, kind = m.groups() + if kind and kind.isdigit(): + kind = int(kind) + value = value.lower() + if 'd' in value: + return as_real(float(value.replace('d', 'e')), kind or 8) + return as_real(float(value), kind or 4) + + # string-literal-constant with kind parameter specification + if r in self.quotes_map: + kind = r[:r.find('@')] + return as_string(self.quotes_map[r], kind or 1) + + # array constructor or literal complex constant or + # parenthesized expression + if r in raw_symbols_map: + paren = _get_parenthesis_kind(r) + items = self.process(restore(raw_symbols_map[r]), + 'expr' if paren == 'ROUND' else 'args') + if paren == 'ROUND': + if isinstance(items, Expr): + return items + if paren in ['ROUNDDIV', 'SQUARE']: + # Expression is a array constructor + if isinstance(items, Expr): + items = (items,) + return as_array(items) + + # function call/indexing + m = re.match(r'\A(.+)\s*(@__f2py_PARENTHESIS_(ROUND|SQUARE)_\d+@)\Z', + r) + if m: + target, args, paren = m.groups() + target = self.process(restore(target)) + args = self.process(restore(args)[1:-1], 'args') + if not isinstance(args, tuple): + args = args, + if paren == 'ROUND': + kwargs = dict((a.left, a.right) for a in args + if isinstance(a, _Pair)) + args = tuple(a for a in args if not isinstance(a, _Pair)) + # Warning: this could also be Fortran indexing operation.. + return as_apply(target, *args, **kwargs) + else: + # Expression is a C/Python indexing operation + # (e.g. used in .pyf files) + assert paren == 'SQUARE' + return target[args] + + # Fortran standard conforming identifier + m = re.match(r'\A\w[\w\d_]*\Z', r) + if m: + return as_symbol(r) + + # fall-back to symbol + r = self.finalize_string(restore(r)) + ewarn( + f'fromstring: treating {r!r} as symbol (original={self.original})') + return as_symbol(r) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__init__.py b/phivenv/Lib/site-packages/numpy/f2py/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..42fb1762d278e24cc6a2f42465202adb98616bf7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/__init__.py @@ -0,0 +1,8 @@ +from numpy.testing import IS_WASM +import pytest + +if IS_WASM: + pytest.skip( + "WASM/Pyodide does not use or support Fortran", + allow_module_level=True + ) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2da789cee62be0ce0f22f00c29a81ff80d40cf1a Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8650631a9395aaf5810743be8144f2f8a0f984c3 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ef3783ad0565100687891e109352c811ea2739d Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c69ee3b1406ffe13a29ed7308e0b252ac9d6ae17 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_block_docstring.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_block_docstring.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd9bbbbea8f8f497959676bafd3571574dd57094 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_block_docstring.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_callback.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_callback.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d07b7246fbf0c6ba1c8654af0c4eaf77fc8c9472 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_callback.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_character.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_character.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f84c874ddd757672babe9314b8925e2e763df70d Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_character.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_common.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_common.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3978b54e55520b3db61515d3e00f1b9c498d2bc Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_common.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_crackfortran.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_crackfortran.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1c94b43025e7d5ec76fd5afb04e2da7a4e51565 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_crackfortran.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_data.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_data.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2acdd4697137f9b490347f4c15e29fbcacc82bb Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_data.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_docs.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_docs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60e5eb7a6824018a26bba4ffaf2584e9f563fde0 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_docs.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_f2cmap.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_f2cmap.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7422954c76327568a0ded13c3458625fbf9fd2cb Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_f2cmap.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_f2py2e.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_f2py2e.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f0f3bc9a9fbf9c218129d0a29290c96644246f9 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_f2py2e.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_isoc.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_isoc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a512a6d91d7a9d939cc6be89a2eb64a069fe31c Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_isoc.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_kind.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_kind.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db83ff491b069ebb8e23910a41800b31c3a9548b Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_kind.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_mixed.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_mixed.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72bbd87edf68c57c1d0fb677c741b70675f31bb7 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_mixed.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_modules.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_modules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..039863814a895cb078575ddc881853251834985d Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_modules.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_parameter.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_parameter.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db3578994a85e69900e7c896954c4d4891f288bb Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_parameter.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_pyf_src.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_pyf_src.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f6264a252d748a7380be11a848ae4fa3a8c9e93 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_pyf_src.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_quoted_character.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_quoted_character.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3970a472c34a846eeb07f32ff4f84f7915073c5a Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_quoted_character.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_regression.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_regression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..351d108bdd57d9503071172ee94fb6854d552f76 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_regression.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_character.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_character.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8ceb7c4bda4eb032808d6a60a950b19c41fd780 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_character.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_complex.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_complex.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a0b344be64daa374dc494dcecff96abbcb55f0a Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_complex.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_integer.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_integer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db73505a6cfcef6490b9517f4797665431cb9abc Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_integer.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_logical.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_logical.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d9b12eaddfc8e67434d00187262bafbc77372b3 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_logical.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_real.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_real.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..917dbcf65d2d2f0e04fc00564f13c189e0d8be89 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_real.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c29287632be3039634b8fce2de7b266b38ec9277 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_size.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_size.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..776e0cdaa25cc4c50705b10d47b5d85e1b4e74ce Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_size.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_string.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_string.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe40dde44033ac3798df0a360eecfb4a62a0cc98 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_string.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_symbolic.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_symbolic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97300d1752a8e1aa4072b9606c877e5e1f91b16d Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_symbolic.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e98d0d36926f7b08d56bcece36b70f54bc6ac74 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/util.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6653763e4923aef804338faad9c2ef1a3dbdd88 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/f2py/tests/__pycache__/util.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f90 new file mode 100644 index 0000000000000000000000000000000000000000..af0ae295a2da50917e3b0ee8e86577b2a6d09139 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f90 @@ -0,0 +1,34 @@ +module ops_module + + abstract interface + subroutine op(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + end subroutine + end interface + +contains + + subroutine foo(x, y, r1, r2) + integer, intent(in) :: x, y + integer, intent(out) :: r1, r2 + procedure (op) add1, add2 + procedure (op), pointer::p + p=>add1 + call p(x, y, r1) + p=>add2 + call p(x, y, r2) + end subroutine +end module + +subroutine add1(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + y +end subroutine + +subroutine add2(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + 2 * y +end subroutine diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 new file mode 100644 index 0000000000000000000000000000000000000000..b37c941e9a29304bd4f5174b18721bff8c137ae3 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 @@ -0,0 +1,6 @@ +module test + abstract interface + subroutine foo() + end subroutine + end interface +end module test diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/phivenv/Lib/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c new file mode 100644 index 0000000000000000000000000000000000000000..2df74846bd9727f2acfc107ccb0d207688f66c63 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -0,0 +1,230 @@ +/* + * This file was auto-generated with f2py (version:2_1330) and hand edited by + * Pearu for testing purposes. Do not edit this file unless you know what you + * are doing!!! + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** See f2py2e/cfuncs.py: includes ***********************/ + +#define PY_SSIZE_T_CLEAN +#include +#include "fortranobject.h" +#include + +static PyObject *wrap_error; +static PyObject *wrap_module; + +/************************************ call ************************************/ +static char doc_f2py_rout_wrap_call[] = "\ +Function signature:\n\ + arr = call(type_num,dims,intent,obj)\n\ +Required arguments:\n" +" type_num : input int\n" +" dims : input int-sequence\n" +" intent : input int\n" +" obj : input python object\n" +"Return objects:\n" +" arr : array"; +static PyObject *f2py_rout_wrap_call(PyObject *capi_self, + PyObject *capi_args) { + PyObject * volatile capi_buildvalue = NULL; + int type_num = 0; + int elsize = 0; + npy_intp *dims = NULL; + PyObject *dims_capi = Py_None; + int rank = 0; + int intent = 0; + PyArrayObject *capi_arr_tmp = NULL; + PyObject *arr_capi = Py_None; + int i; + + if (!PyArg_ParseTuple(capi_args,"iiOiO|:wrap.call",\ + &type_num,&elsize,&dims_capi,&intent,&arr_capi)) + return NULL; + rank = PySequence_Length(dims_capi); + dims = malloc(rank*sizeof(npy_intp)); + for (i=0;ikind, + PyArray_DESCR(arr)->type, + PyArray_TYPE(arr), + PyArray_ITEMSIZE(arr), + PyDataType_ALIGNMENT(arr), + PyArray_FLAGS(arr), + PyArray_ITEMSIZE(arr)); +} + +static PyMethodDef f2py_module_methods[] = { + + {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, + {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, + {NULL,NULL} +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "test_array_from_pyobj_ext", + NULL, + -1, + f2py_module_methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { + PyObject *m,*d, *s; + m = wrap_module = PyModule_Create(&moduledef); + Py_SET_TYPE(&PyFortran_Type, &PyType_Type); + import_array(); + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap (failed to import numpy)"); + d = PyModule_GetDict(m); + s = PyUnicode_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n" + " arr = call(type_num,dims,intent,obj)\n" + "."); + PyDict_SetItemString(d, "__doc__", s); + wrap_error = PyErr_NewException ("wrap.error", NULL, NULL); + Py_DECREF(s); + +#define ADDCONST(NAME, CONST) \ + s = PyLong_FromLong(CONST); \ + PyDict_SetItemString(d, NAME, s); \ + Py_DECREF(s) + + ADDCONST("F2PY_INTENT_IN", F2PY_INTENT_IN); + ADDCONST("F2PY_INTENT_INOUT", F2PY_INTENT_INOUT); + ADDCONST("F2PY_INTENT_OUT", F2PY_INTENT_OUT); + ADDCONST("F2PY_INTENT_HIDE", F2PY_INTENT_HIDE); + ADDCONST("F2PY_INTENT_CACHE", F2PY_INTENT_CACHE); + ADDCONST("F2PY_INTENT_COPY", F2PY_INTENT_COPY); + ADDCONST("F2PY_INTENT_C", F2PY_INTENT_C); + ADDCONST("F2PY_OPTIONAL", F2PY_OPTIONAL); + ADDCONST("F2PY_INTENT_INPLACE", F2PY_INTENT_INPLACE); + ADDCONST("NPY_BOOL", NPY_BOOL); + ADDCONST("NPY_BYTE", NPY_BYTE); + ADDCONST("NPY_UBYTE", NPY_UBYTE); + ADDCONST("NPY_SHORT", NPY_SHORT); + ADDCONST("NPY_USHORT", NPY_USHORT); + ADDCONST("NPY_INT", NPY_INT); + ADDCONST("NPY_UINT", NPY_UINT); + ADDCONST("NPY_INTP", NPY_INTP); + ADDCONST("NPY_UINTP", NPY_UINTP); + ADDCONST("NPY_LONG", NPY_LONG); + ADDCONST("NPY_ULONG", NPY_ULONG); + ADDCONST("NPY_LONGLONG", NPY_LONGLONG); + ADDCONST("NPY_ULONGLONG", NPY_ULONGLONG); + ADDCONST("NPY_FLOAT", NPY_FLOAT); + ADDCONST("NPY_DOUBLE", NPY_DOUBLE); + ADDCONST("NPY_LONGDOUBLE", NPY_LONGDOUBLE); + ADDCONST("NPY_CFLOAT", NPY_CFLOAT); + ADDCONST("NPY_CDOUBLE", NPY_CDOUBLE); + ADDCONST("NPY_CLONGDOUBLE", NPY_CLONGDOUBLE); + ADDCONST("NPY_OBJECT", NPY_OBJECT); + ADDCONST("NPY_STRING", NPY_STRING); + ADDCONST("NPY_UNICODE", NPY_UNICODE); + ADDCONST("NPY_VOID", NPY_VOID); + ADDCONST("NPY_NTYPES_LEGACY", NPY_NTYPES_LEGACY); + ADDCONST("NPY_NOTYPE", NPY_NOTYPE); + ADDCONST("NPY_USERDEF", NPY_USERDEF); + + ADDCONST("CONTIGUOUS", NPY_ARRAY_C_CONTIGUOUS); + ADDCONST("FORTRAN", NPY_ARRAY_F_CONTIGUOUS); + ADDCONST("OWNDATA", NPY_ARRAY_OWNDATA); + ADDCONST("FORCECAST", NPY_ARRAY_FORCECAST); + ADDCONST("ENSURECOPY", NPY_ARRAY_ENSURECOPY); + ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY); + ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED); + ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE); + ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY); + + ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED); + ADDCONST("BEHAVED_NS", NPY_ARRAY_BEHAVED_NS); + ADDCONST("CARRAY", NPY_ARRAY_CARRAY); + ADDCONST("FARRAY", NPY_ARRAY_FARRAY); + ADDCONST("CARRAY_RO", NPY_ARRAY_CARRAY_RO); + ADDCONST("FARRAY_RO", NPY_ARRAY_FARRAY_RO); + ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT); + ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL); + +#undef ADDCONST( + + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap"); + +#ifdef F2PY_REPORT_ATEXIT + on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); +#endif + + return m; +} +#ifdef __cplusplus +} +#endif diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap b/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap new file mode 100644 index 0000000000000000000000000000000000000000..273c177824c9ca8fea68791e4ba44c5058a79f6d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap @@ -0,0 +1 @@ +dict(real=dict(rk="double")) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 new file mode 100644 index 0000000000000000000000000000000000000000..bb7822023363bab9bfcf4d5b29eec5f231e523b9 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 @@ -0,0 +1,34 @@ + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 new file mode 100644 index 0000000000000000000000000000000000000000..d6da9f4b8bed19b3c84538ae0bdf232e66498fb7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 @@ -0,0 +1,41 @@ + +module mod + +contains + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum + + +end module mod diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 new file mode 100644 index 0000000000000000000000000000000000000000..992147c7bb23ed65bf1a43b431e863abafc4cbd6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 @@ -0,0 +1,19 @@ +subroutine sum_with_use(x, res) + use precision + + implicit none + + real(kind=rk), intent(in) :: x(:) + real(kind=rk), intent(out) :: res + + integer :: i + + !print *, "size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + + end subroutine diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 new file mode 100644 index 0000000000000000000000000000000000000000..8072a240ab4e1cccef43b060e13738eb45a5563d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 @@ -0,0 +1,4 @@ +module precision + integer, parameter :: rk = selected_real_kind(8) + integer, parameter :: ik = selected_real_kind(4) +end module diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/block_docstring/foo.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/block_docstring/foo.f new file mode 100644 index 0000000000000000000000000000000000000000..aecd66e8e20a5d3cee1765d4d42123697f554fd4 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/block_docstring/foo.f @@ -0,0 +1,6 @@ + SUBROUTINE FOO() + INTEGER BAR(2, 3) + + COMMON /BLOCK/ BAR + RETURN + END diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/foo.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/foo.f new file mode 100644 index 0000000000000000000000000000000000000000..1ecd6d476577a7369c08d2b4bb7e7efb0383d24a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/foo.f @@ -0,0 +1,62 @@ + subroutine t(fun,a) + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine func(a) +cf2py intent(in,out) a + integer a + a = a + 11 + end + + subroutine func0(a) +cf2py intent(out) a + integer a + a = 11 + end + + subroutine t2(a) +cf2py intent(callback) fun + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine string_callback(callback, a) + external callback + double precision callback + double precision a + character*1 r +cf2py intent(out) a + r = 'r' + a = callback(r) + end + + subroutine string_callback_array(callback, cu, lencu, a) + external callback + integer callback + integer lencu + character*8 cu(lencu) + integer a +cf2py intent(out) a + + a = callback(cu, lencu) + end + + subroutine hidden_callback(a, r) + external global_f +cf2py intent(callback, hide) global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end + + subroutine hidden_callback2(a, r) + external global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/gh17797.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/gh17797.f90 new file mode 100644 index 0000000000000000000000000000000000000000..0c1d503eddf352ea9ab471fd437859d2ded6f708 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/gh17797.f90 @@ -0,0 +1,7 @@ +function gh17797(f, y) result(r) + external f + integer(8) :: r, f + integer(8), dimension(:) :: y + r = f(0) + r = r + sum(y) +end function gh17797 diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/gh18335.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/gh18335.f90 new file mode 100644 index 0000000000000000000000000000000000000000..e758b0d9d15a53c1be633484365bcd1f6b0f798d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/gh18335.f90 @@ -0,0 +1,17 @@ + ! When gh18335_workaround is defined as an extension, + ! the issue cannot be reproduced. + !subroutine gh18335_workaround(f, y) + ! implicit none + ! external f + ! integer(kind=1) :: y(1) + ! call f(y) + !end subroutine gh18335_workaround + + function gh18335(f) result (r) + implicit none + external f + integer(kind=1) :: y(1), r + y(1) = 123 + call f(y) + r = y(1) + end function gh18335 diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/gh25211.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/gh25211.f new file mode 100644 index 0000000000000000000000000000000000000000..08d85c7daf850621b7ee680efa2035d438dea05e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/gh25211.f @@ -0,0 +1,10 @@ + SUBROUTINE FOO(FUN,R) + EXTERNAL FUN + INTEGER I + REAL*8 R, FUN +Cf2py intent(out) r + R = 0D0 + DO I=-5,5 + R = R + FUN(I) + ENDDO + END diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/gh25211.pyf b/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/gh25211.pyf new file mode 100644 index 0000000000000000000000000000000000000000..dd221f970dee978499d8b728f89e0bc1b896c3d3 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/callback/gh25211.pyf @@ -0,0 +1,18 @@ +python module __user__routines + interface + function fun(i) result (r) + integer :: i + real*8 :: r + end function fun + end interface +end python module __user__routines + +python module callback2 + interface + subroutine foo(f,r) + use __user__routines, f=>fun + external f + real*8 intent(out) :: r + end subroutine foo + end interface +end python module callback2 diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/cli/gh_22819.pyf b/phivenv/Lib/site-packages/numpy/f2py/tests/src/cli/gh_22819.pyf new file mode 100644 index 0000000000000000000000000000000000000000..b79e727e2b9f472b354e4d409a877a7a42d4ec0a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/cli/gh_22819.pyf @@ -0,0 +1,6 @@ +python module test_22819 + interface + subroutine hello() + end subroutine hello + end interface +end python module test_22819 diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/cli/hi77.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/cli/hi77.f new file mode 100644 index 0000000000000000000000000000000000000000..efdf1de677719c81bf19c01c8adb3b53841cf400 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/cli/hi77.f @@ -0,0 +1,3 @@ + SUBROUTINE HI + PRINT*, "HELLO WORLD" + END SUBROUTINE diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/cli/hiworld.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/cli/hiworld.f90 new file mode 100644 index 0000000000000000000000000000000000000000..8f390ee3a29bc460c36edafd3ea27e9df6bb08bf --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/cli/hiworld.f90 @@ -0,0 +1,3 @@ +function hi() + print*, "Hello World" +end function diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/common/block.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/common/block.f new file mode 100644 index 0000000000000000000000000000000000000000..32a26667d520a782f4be75d3c578857e92c46211 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/common/block.f @@ -0,0 +1,11 @@ + SUBROUTINE INITCB + DOUBLE PRECISION LONG + CHARACTER STRING + INTEGER OK + + COMMON /BLOCK/ LONG, STRING, OK + LONG = 1.0 + STRING = '2' + OK = 3 + RETURN + END diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/common/gh19161.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/common/gh19161.f90 new file mode 100644 index 0000000000000000000000000000000000000000..3b5e9b6d3f9ff0466db5e0bbbe2be82d39b61326 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/common/gh19161.f90 @@ -0,0 +1,10 @@ +module typedefmod + use iso_fortran_env, only: real32 +end module typedefmod + +module data + use typedefmod, only: real32 + implicit none + real(kind=real32) :: x + common/test/x +end module data diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 new file mode 100644 index 0000000000000000000000000000000000000000..9cc30aa0376eaeff23edeb85469c14f9e1694922 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 @@ -0,0 +1,13 @@ +module foo + public + type, private, bind(c) :: a + integer :: i + end type a + type, bind(c) :: b_ + integer :: j + end type b_ + public :: b_ + type :: c + integer :: k + end type c +end module foo diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/data_common.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/data_common.f new file mode 100644 index 0000000000000000000000000000000000000000..ffb05100e5834841a6eeddaeabe50f8adf578770 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/data_common.f @@ -0,0 +1,8 @@ + BLOCK DATA PARAM_INI + COMMON /MYCOM/ MYDATA + DATA MYDATA /0/ + END + SUBROUTINE SUB1 + COMMON /MYCOM/ MYDATA + MYDATA = MYDATA + 1 + END diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/data_multiplier.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/data_multiplier.f new file mode 100644 index 0000000000000000000000000000000000000000..420db208cb5d0552a3a52bb6e6ef52d16dd840f2 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/data_multiplier.f @@ -0,0 +1,5 @@ + BLOCK DATA MYBLK + IMPLICIT DOUBLE PRECISION (A-H,O-Z) + COMMON /MYCOM/ IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 + DATA IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 /2*3,2*2,0.0D0/ + END diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/data_stmts.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/data_stmts.f90 new file mode 100644 index 0000000000000000000000000000000000000000..b0e1207cdda6676c4addf2c9a4f8445fb8b38dd6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/data_stmts.f90 @@ -0,0 +1,20 @@ +! gh-23276 +module cmplxdat + implicit none + integer :: i, j + real :: x, y + real, dimension(2) :: z + real(kind=8) :: pi + complex(kind=8), target :: medium_ref_index + complex(kind=8), target :: ref_index_one, ref_index_two + complex(kind=8), dimension(2) :: my_array + real(kind=8), dimension(3) :: my_real_array = (/1.0d0, 2.0d0, 3.0d0/) + + data i, j / 2, 3 / + data x, y / 1.5, 2.0 / + data z / 3.5, 7.0 / + data medium_ref_index / (1.d0, 0.d0) / + data ref_index_one, ref_index_two / (13.0d0, 21.0d0), (-30.0d0, 43.0d0) / + data my_array / (1.0d0, 2.0d0), (-3.0d0, 4.0d0) / + data pi / 3.1415926535897932384626433832795028841971693993751058209749445923078164062d0 / +end module cmplxdat diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f new file mode 100644 index 0000000000000000000000000000000000000000..c6d4c34e33979e6249cef9e4af4d6b9372013b9d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f @@ -0,0 +1,8 @@ + BLOCK DATA PARAM_INI + COMMON /MYCOM/ MYTAB + INTEGER MYTAB(3) + DATA MYTAB/ + * 0, ! 1 and more commenty stuff + * 4, ! 2 + * 0 / + END diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 new file mode 100644 index 0000000000000000000000000000000000000000..a2d1d8769f47365051a6945f3d348196b960099c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 @@ -0,0 +1,6 @@ +module foo + type bar + character(len = 4) :: text + end type bar + type(bar), parameter :: abar = bar('abar') +end module foo diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f new file mode 100644 index 0000000000000000000000000000000000000000..12535e388084d0d720a5c1ebcaa2d3065a64bfd8 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f @@ -0,0 +1,16 @@ + subroutine subb(k) + real(8), intent(inout) :: k(:) + k=k+1 + endsubroutine + + subroutine subc(w,k) + real(8), intent(in) :: w(:) + real(8), intent(out) :: k(size(w)) + k=w+1 + endsubroutine + + function t0(value) + character value + character t0 + t0 = value + endfunction diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f new file mode 100644 index 0000000000000000000000000000000000000000..23b872842fbad90e5f1fdfdb335270113de5f43b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f @@ -0,0 +1,12 @@ + integer(8) function external_as_statement(fcn) + implicit none + external fcn + integer(8) :: fcn + external_as_statement = fcn(0) + end + + integer(8) function external_as_attribute(fcn) + implicit none + integer(8), external :: fcn + external_as_attribute = fcn(0) + end diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh22648.pyf b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh22648.pyf new file mode 100644 index 0000000000000000000000000000000000000000..6c93b48cae95336e1281848f04f5374fef856450 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh22648.pyf @@ -0,0 +1,7 @@ +python module iri16py ! in + interface ! in :iri16py + block data ! in :iri16py:iridreg_modified.for + COMMON /fircom/ eden,tabhe,tabla,tabmo,tabza,tabfl + end block data + end interface +end python module iri16py diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh23533.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh23533.f new file mode 100644 index 0000000000000000000000000000000000000000..d1515e3a0dce2edeb5aba0b364978a00c6a4fe77 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh23533.f @@ -0,0 +1,5 @@ + SUBROUTINE EXAMPLE( ) + IF( .TRUE. ) THEN + CALL DO_SOMETHING() + END IF ! ** .TRUE. ** + END diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh23598.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh23598.f90 new file mode 100644 index 0000000000000000000000000000000000000000..dfabde2024698e5a6609f3586a23ade19ec40460 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh23598.f90 @@ -0,0 +1,4 @@ +integer function intproduct(a, b) result(res) + integer, intent(in) :: a, b + res = a*b +end function diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 new file mode 100644 index 0000000000000000000000000000000000000000..a8bed3f0798d8548609a06e2b2906c8b7c769a01 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 @@ -0,0 +1,11 @@ +module test_bug + implicit none + private + public :: intproduct + +contains + integer function intproduct(a, b) result(res) + integer, intent(in) :: a, b + res = a*b + end function +end module diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh23879.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh23879.f90 new file mode 100644 index 0000000000000000000000000000000000000000..1b39eb656de6277b80da0f0e3b8a74b0906edb92 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh23879.f90 @@ -0,0 +1,20 @@ +module gh23879 + implicit none + private + public :: foo + + contains + + subroutine foo(a, b) + integer, intent(in) :: a + integer, intent(out) :: b + b = a + call bar(b) + end subroutine + + subroutine bar(x) + integer, intent(inout) :: x + x = 2*x + end subroutine + + end module gh23879 diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 new file mode 100644 index 0000000000000000000000000000000000000000..bd748996d58227327d56a6b4fca9a40d5dee7bcb --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 @@ -0,0 +1,13 @@ + subroutine gh2848( & + ! first 2 parameters + par1, par2,& + ! last 2 parameters + par3, par4) + + integer, intent(in) :: par1, par2 + integer, intent(out) :: par3, par4 + + par3 = par1 + par4 = par2 + + end subroutine gh2848 diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/operators.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/operators.f90 new file mode 100644 index 0000000000000000000000000000000000000000..83481c8e228cb78fdbb1fae50c309b6602d9e1b7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/operators.f90 @@ -0,0 +1,49 @@ +module foo + type bar + character(len = 32) :: item + end type bar + interface operator(.item.) + module procedure item_int, item_real + end interface operator(.item.) + interface operator(==) + module procedure items_are_equal + end interface operator(==) + interface assignment(=) + module procedure get_int, get_real + end interface assignment(=) +contains + function item_int(val) result(elem) + integer, intent(in) :: val + type(bar) :: elem + + write(elem%item, "(I32)") val + end function item_int + + function item_real(val) result(elem) + real, intent(in) :: val + type(bar) :: elem + + write(elem%item, "(1PE32.12)") val + end function item_real + + function items_are_equal(val1, val2) result(equal) + type(bar), intent(in) :: val1, val2 + logical :: equal + + equal = (val1%item == val2%item) + end function items_are_equal + + subroutine get_real(rval, item) + real, intent(out) :: rval + type(bar), intent(in) :: item + + read(item%item, *) rval + end subroutine get_real + + subroutine get_int(rval, item) + integer, intent(out) :: rval + type(bar), intent(in) :: item + + read(item%item, *) rval + end subroutine get_int +end module foo diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 new file mode 100644 index 0000000000000000000000000000000000000000..ad88a2ead99e5406f036cafc2b182a7292cd0098 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 @@ -0,0 +1,11 @@ +module foo + private + integer :: a + public :: setA + integer :: b +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 new file mode 100644 index 0000000000000000000000000000000000000000..f108d057c5a3a1cfdf7b6b2492dd13467165c1c7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 @@ -0,0 +1,10 @@ +module foo + public + integer, private :: a + public :: setA +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 new file mode 100644 index 0000000000000000000000000000000000000000..e3993c161d1cf611355ee1e953d7c0f17b033b18 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 @@ -0,0 +1,10 @@ +module foo + public + integer, private :: a + integer :: b +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 new file mode 100644 index 0000000000000000000000000000000000000000..f7b4f4f1481df6c91d6c3b393c612d41c3414861 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 @@ -0,0 +1,4 @@ +subroutine foo(x) + real(8), intent(in) :: x + ! Écrit à l'écran la valeur de x +end subroutine diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap b/phivenv/Lib/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap new file mode 100644 index 0000000000000000000000000000000000000000..36da2dda79d828678a05e5a1f9a96849f675d73f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap @@ -0,0 +1 @@ +dict(real=dict(real32='float', real64='double'), integer=dict(int64='long_long')) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 new file mode 100644 index 0000000000000000000000000000000000000000..f1ba041b8e359494009a2791f7429bfaec1e43d7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 @@ -0,0 +1,9 @@ + subroutine func1(n, x, res) + use, intrinsic :: iso_fortran_env, only: int64, real64 + implicit none + integer(int64), intent(in) :: n + real(real64), intent(in) :: x(n) + real(real64), intent(out) :: res +!f2py intent(hide) :: n + res = sum(x) + end diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/isocintrin/isoCtests.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/isocintrin/isoCtests.f90 new file mode 100644 index 0000000000000000000000000000000000000000..bc562528d1c129483a7971556f0c828014299674 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/isocintrin/isoCtests.f90 @@ -0,0 +1,34 @@ + module coddity + use iso_c_binding, only: c_double, c_int, c_int64_t + implicit none + contains + subroutine c_add(a, b, c) bind(c, name="c_add") + real(c_double), intent(in) :: a, b + real(c_double), intent(out) :: c + c = a + b + end subroutine c_add + ! gh-9693 + function wat(x, y) result(z) bind(c) + integer(c_int), intent(in) :: x, y + integer(c_int) :: z + + z = x + 7 + end function wat + ! gh-25207 + subroutine c_add_int64(a, b, c) bind(c) + integer(c_int64_t), intent(in) :: a, b + integer(c_int64_t), intent(out) :: c + c = a + b + end subroutine c_add_int64 + ! gh-25207 + subroutine add_arr(A, B, C) + integer(c_int64_t), intent(in) :: A(3) + integer(c_int64_t), intent(in) :: B(3) + integer(c_int64_t), intent(out) :: C(3) + integer :: j + + do j = 1, 3 + C(j) = A(j)+B(j) + end do + end subroutine + end module coddity diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/kind/foo.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/kind/foo.f90 new file mode 100644 index 0000000000000000000000000000000000000000..57b8b378a32f45c9b6f3db12c12ec03e94cb90ee --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/kind/foo.f90 @@ -0,0 +1,20 @@ + + +subroutine selectedrealkind(p, r, res) + implicit none + + integer, intent(in) :: p, r + !f2py integer :: r=0 + integer, intent(out) :: res + res = selected_real_kind(p, r) + +end subroutine + +subroutine selectedintkind(p, res) + implicit none + + integer, intent(in) :: p + integer, intent(out) :: res + res = selected_int_kind(p) + +end subroutine diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/mixed/foo.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/mixed/foo.f new file mode 100644 index 0000000000000000000000000000000000000000..a77d1e09e4b348daf854cd508bf7f05b5cc8b5be --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/mixed/foo.f @@ -0,0 +1,5 @@ + subroutine bar11(a) +cf2py intent(out) a + integer a + a = 11 + end diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 new file mode 100644 index 0000000000000000000000000000000000000000..334133eb5808b45268747eb007ac983f0ab01efa --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 @@ -0,0 +1,8 @@ + module foo_fixed + contains + subroutine bar12(a) +!f2py intent(out) a + integer a + a = 12 + end subroutine bar12 + end module foo_fixed diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 new file mode 100644 index 0000000000000000000000000000000000000000..5bfc3d262127be96bb7c442b9d35e9498278eb24 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 @@ -0,0 +1,8 @@ +module foo_free +contains + subroutine bar13(a) + !f2py intent(out) a + integer a + a = 13 + end subroutine bar13 +end module foo_free diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/modules/gh25337/data.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/modules/gh25337/data.f90 new file mode 100644 index 0000000000000000000000000000000000000000..84c708bd5da207295c7cd2a0d1ebe333a963a063 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/modules/gh25337/data.f90 @@ -0,0 +1,8 @@ +module data + real(8) :: shift +contains + subroutine set_shift(in_shift) + real(8), intent(in) :: in_shift + shift = in_shift + end subroutine set_shift +end module data diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/modules/gh25337/use_data.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/modules/gh25337/use_data.f90 new file mode 100644 index 0000000000000000000000000000000000000000..50c7df148a4d7115ccd26f32e8fb9de550d1d590 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/modules/gh25337/use_data.f90 @@ -0,0 +1,6 @@ +subroutine shift_a(dim_a, a) + use data, only: shift + integer, intent(in) :: dim_a + real(8), intent(inout), dimension(dim_a) :: a + a = a + shift +end subroutine shift_a diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/modules/module_data_docstring.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/modules/module_data_docstring.f90 new file mode 100644 index 0000000000000000000000000000000000000000..3a6d2199124d22be8b11fc0cb96e4257700e4b37 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/modules/module_data_docstring.f90 @@ -0,0 +1,12 @@ +module mod + integer :: i + integer :: x(4) + real, dimension(2,3) :: a + real, allocatable, dimension(:,:) :: b +contains + subroutine foo + integer :: k + k = 1 + a(1,2) = a(1,2)+3 + end subroutine foo +end module mod diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/modules/use_modules.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/modules/use_modules.f90 new file mode 100644 index 0000000000000000000000000000000000000000..6d6687c2da9607f306fb470e5a7eeb34fb32707b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/modules/use_modules.f90 @@ -0,0 +1,20 @@ +module mathops + implicit none +contains + function add(a, b) result(c) + integer, intent(in) :: a, b + integer :: c + c = a + b + end function add +end module mathops + +module useops + use mathops, only: add + implicit none +contains + function sum_and_double(a, b) result(d) + integer, intent(in) :: a, b + integer :: d + d = 2 * add(a, b) + end function sum_and_double +end module useops diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 new file mode 100644 index 0000000000000000000000000000000000000000..66501639a7b2a2259c10cd8e7cef01e58033bc2e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 @@ -0,0 +1,7 @@ +subroutine foo(is_, ie_, arr, tout) + implicit none + integer :: is_,ie_ + real, intent(in) :: arr(is_:ie_) + real, intent(out) :: tout(is_:ie_) + tout = arr +end diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_array.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_array.f90 new file mode 100644 index 0000000000000000000000000000000000000000..80dce540c4ccf3c45a43aa85fc5865266918836d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_array.f90 @@ -0,0 +1,45 @@ +! Check that parameter arrays are correctly intercepted. +subroutine foo_array(x, y, z) + implicit none + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: pa = 2 + integer, parameter :: intparamarray(2) = (/ 3, 5 /) + integer, dimension(pa), parameter :: pb = (/ 2, 10 /) + integer, parameter, dimension(intparamarray(1)) :: pc = (/ 2, 10, 20 /) + real(dp), parameter :: doubleparamarray(3) = (/ 3.14_dp, 4._dp, 6.44_dp /) + real(dp), intent(inout) :: x(intparamarray(1)) + real(dp), intent(inout) :: y(intparamarray(2)) + real(dp), intent(out) :: z + + x = x/pb(2) + y = y*pc(2) + z = doubleparamarray(1)*doubleparamarray(2) + doubleparamarray(3) + + return +end subroutine + +subroutine foo_array_any_index(x, y) + implicit none + integer, parameter :: dp = selected_real_kind(15) + integer, parameter, dimension(-1:1) :: myparamarray = (/ 6, 3, 1 /) + integer, parameter, dimension(2) :: nested = (/ 2, 0 /) + integer, parameter :: dim = 2 + real(dp), intent(in) :: x(myparamarray(-1)) + real(dp), intent(out) :: y(nested(1), myparamarray(nested(dim))) + + y = reshape(x, (/nested(1), myparamarray(nested(2))/)) + + return +end subroutine + +subroutine foo_array_delims(x) + implicit none + integer, parameter :: dp = selected_real_kind(15) + integer, parameter, dimension(2) :: myparamarray = (/ (6), 1 /) + integer, parameter, dimension(3) :: test = (/2, 1, (3)/) + real(dp), intent(out) :: x + + x = myparamarray(1)+test(3) + + return +end subroutine diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 new file mode 100644 index 0000000000000000000000000000000000000000..b16af3e8bb5c533c6ef5a051537e471565ca4337 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 @@ -0,0 +1,57 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3._sp + real(dp), parameter :: three_d = 3._dp + integer(ii), parameter :: three_i = 3_ii + integer(il), parameter :: three_l = 3_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + + +subroutine foo_no(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3. + real(dp), parameter :: three_d = 3. + integer(ii), parameter :: three_i = 3 + integer(il), parameter :: three_l = 3 + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + +subroutine foo_sum(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 2._sp + 1._sp + real(dp), parameter :: three_d = 1._dp + 2._dp + integer(ii), parameter :: three_i = 2_ii + 1_ii + integer(il), parameter :: three_l = 1_il + 2_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 new file mode 100644 index 0000000000000000000000000000000000000000..8dbe74de4c1fafb66ca5ed08fbeebc4b36c4926b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 @@ -0,0 +1,15 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + integer(ii), parameter :: two = 2_ii + integer(ii), parameter :: six = three * 1_ii * two + + x(1) = x(1) + x(2) + x(3) * six + return +end subroutine diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 new file mode 100644 index 0000000000000000000000000000000000000000..34756a390028e801d78945bb94d74f220a8b43d6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 @@ -0,0 +1,22 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_long(x) + implicit none + integer, parameter :: ii = selected_int_kind(18) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 new file mode 100644 index 0000000000000000000000000000000000000000..bcaa03bd4f7233eec8a21b0fb9a41a949ecc1938 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Specifically that types of constants without +! compound kind specs are correctly inferred +! adapted Gibbs iteration code from pymc +! for this test case +subroutine foo_non_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + + integer(ii) maxiterates + parameter (maxiterates=2) + + integer(ii) maxseries + parameter (maxseries=2) + + integer(ii) wasize + parameter (wasize=maxiterates*maxseries) + integer(ii), intent(inout) :: x + dimension x(wasize) + + x(1) = x(1) + x(2) + x(3) + x(4) * wasize + return +end subroutine diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 new file mode 100644 index 0000000000000000000000000000000000000000..c4d25bbbd7a2953f2a9d30f905f868645d5bdb84 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_single(x) + implicit none + integer, parameter :: rp = selected_real_kind(6) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_double(x) + implicit none + integer, parameter :: rp = selected_real_kind(15) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/quoted_character/foo.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/quoted_character/foo.f new file mode 100644 index 0000000000000000000000000000000000000000..bd2e8eb149ff0b15494d5d42516648256ba4bca9 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/quoted_character/foo.f @@ -0,0 +1,14 @@ + SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6) + CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR + PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!", + 1 OPENPAR="(", CLOSEPAR=")") + CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 +Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 + OUT1 = SINGLE + OUT2 = DOUBLE + OUT3 = SEMICOL + OUT4 = EXCLA + OUT5 = OPENPAR + OUT6 = CLOSEPAR + RETURN + END diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/AB.inc b/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/AB.inc new file mode 100644 index 0000000000000000000000000000000000000000..712b0c24fd048e7e98407c36c6f255a03dedeb57 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/AB.inc @@ -0,0 +1 @@ +real(8) b, n, m diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/f77comments.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/f77comments.f new file mode 100644 index 0000000000000000000000000000000000000000..901dedadb2c6e679c5490567d146b21413c9d869 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/f77comments.f @@ -0,0 +1,26 @@ + SUBROUTINE TESTSUB( + & INPUT1, INPUT2, !Input + & OUTPUT1, OUTPUT2) !Output + + IMPLICIT NONE + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + + OUTPUT1 = INPUT1 + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 + + RETURN + END SUBROUTINE TESTSUB + + SUBROUTINE TESTSUB2(OUTPUT) + IMPLICIT NONE + INTEGER, PARAMETER :: N = 10 ! Array dimension + REAL, INTENT(OUT) :: OUTPUT(N) + INTEGER :: I + + DO I = 1, N + OUTPUT(I) = I * 2.0 + END DO + + RETURN + END diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/f77fixedform.f95 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/f77fixedform.f95 new file mode 100644 index 0000000000000000000000000000000000000000..2cf1d00c1dde0bf51385608b7b29662f6a6556a0 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/f77fixedform.f95 @@ -0,0 +1,5 @@ +C This is an invalid file, but it does compile with -ffixed-form + subroutine mwe( + & x) + real x + end subroutine mwe diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/f90continuation.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/f90continuation.f90 new file mode 100644 index 0000000000000000000000000000000000000000..06912719dbeea9e870a1a6362adf83047162f911 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/f90continuation.f90 @@ -0,0 +1,9 @@ +SUBROUTINE TESTSUB(INPUT1, & ! Hello +! commenty +INPUT2, OUTPUT1, OUTPUT2) ! more comments + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + OUTPUT1 = INPUT1 + & + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 +END SUBROUTINE TESTSUB diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/incfile.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/incfile.f90 new file mode 100644 index 0000000000000000000000000000000000000000..3caef77b67e8cf2e78d269b53a4e5bedbfe92ac3 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/incfile.f90 @@ -0,0 +1,5 @@ +function add(n,m) result(b) + implicit none + include 'AB.inc' + b = n + m +end function add diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/inout.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/inout.f90 new file mode 100644 index 0000000000000000000000000000000000000000..430258a3cfc01c73fd2993435681639d9df684f7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/regression/inout.f90 @@ -0,0 +1,9 @@ +! Check that intent(in out) translates as intent(inout). +! The separation seems to be a common usage. + subroutine foo(x) + implicit none + real(4), intent(in out) :: x + dimension x(3) + x(1) = x(1) + x(2) + x(3) + return + end diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_character/foo77.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_character/foo77.f new file mode 100644 index 0000000000000000000000000000000000000000..7b025c1ac9cadb5f010df86a574dfd9b5671e913 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_character/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + character value + character t0 + t0 = value + end + function t1(value) + character*1 value + character*1 t1 + t1 = value + end + function t5(value) + character*5 value + character*5 t5 + t5 = value + end + function ts(value) + character*(*) value + character*(*) ts + ts = value + end + + subroutine s0(t0,value) + character value + character t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + character*1 value + character*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s5(t5,value) + character*5 value + character*5 t5 +cf2py intent(out) t5 + t5 = value + end + subroutine ss(ts,value) + character*(*) value + character*10 ts +cf2py intent(out) ts + ts = value + end diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_character/foo90.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_character/foo90.f90 new file mode 100644 index 0000000000000000000000000000000000000000..09a50ccd069365eb502ae141055ab96293e12a0e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_character/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_char + contains + function t0(value) + character :: value + character :: t0 + t0 = value + end function t0 + function t1(value) + character(len=1) :: value + character(len=1) :: t1 + t1 = value + end function t1 + function t5(value) + character(len=5) :: value + character(len=5) :: t5 + t5 = value + end function t5 + function ts(value) + character(len=*) :: value + character(len=10) :: ts + ts = value + end function ts + + subroutine s0(t0,value) + character :: value + character :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + character(len=1) :: value + character(len=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s5(t5,value) + character(len=5) :: value + character(len=5) :: t5 +!f2py intent(out) t5 + t5 = value + end subroutine s5 + subroutine ss(ts,value) + character(len=*) :: value + character(len=10) :: ts +!f2py intent(out) ts + ts = value + end subroutine ss +end module f90_return_char diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_complex/foo77.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_complex/foo77.f new file mode 100644 index 0000000000000000000000000000000000000000..22e11efc0371ffb2f2b08c76c3ad55b7004be3c5 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_complex/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + complex value + complex t0 + t0 = value + end + function t8(value) + complex*8 value + complex*8 t8 + t8 = value + end + function t16(value) + complex*16 value + complex*16 t16 + t16 = value + end + function td(value) + double complex value + double complex td + td = value + end + + subroutine s0(t0,value) + complex value + complex t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s8(t8,value) + complex*8 value + complex*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine s16(t16,value) + complex*16 value + complex*16 t16 +cf2py intent(out) t16 + t16 = value + end + subroutine sd(td,value) + double complex value + double complex td +cf2py intent(out) td + td = value + end diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_complex/foo90.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_complex/foo90.f90 new file mode 100644 index 0000000000000000000000000000000000000000..34ab31f3af93a7195e5ffd4404d2fd1168aed282 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_complex/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_complex + contains + function t0(value) + complex :: value + complex :: t0 + t0 = value + end function t0 + function t8(value) + complex(kind=4) :: value + complex(kind=4) :: t8 + t8 = value + end function t8 + function t16(value) + complex(kind=8) :: value + complex(kind=8) :: t16 + t16 = value + end function t16 + function td(value) + double complex :: value + double complex :: td + td = value + end function td + + subroutine s0(t0,value) + complex :: value + complex :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s8(t8,value) + complex(kind=4) :: value + complex(kind=4) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine s16(t16,value) + complex(kind=8) :: value + complex(kind=8) :: t16 +!f2py intent(out) t16 + t16 = value + end subroutine s16 + subroutine sd(td,value) + double complex :: value + double complex :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_complex diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_integer/foo77.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_integer/foo77.f new file mode 100644 index 0000000000000000000000000000000000000000..b910f261a31f4c6af6f40b4f1069d5f951d47d71 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_integer/foo77.f @@ -0,0 +1,56 @@ + function t0(value) + integer value + integer t0 + t0 = value + end + function t1(value) + integer*1 value + integer*1 t1 + t1 = value + end + function t2(value) + integer*2 value + integer*2 t2 + t2 = value + end + function t4(value) + integer*4 value + integer*4 t4 + t4 = value + end + function t8(value) + integer*8 value + integer*8 t8 + t8 = value + end + + subroutine s0(t0,value) + integer value + integer t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + integer*1 value + integer*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + integer*2 value + integer*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + integer*4 value + integer*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + integer*8 value + integer*8 t8 +cf2py intent(out) t8 + t8 = value + end diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_integer/foo90.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_integer/foo90.f90 new file mode 100644 index 0000000000000000000000000000000000000000..e5da9ec19feef90a38bb2fa364cbfebe37fcf912 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_integer/foo90.f90 @@ -0,0 +1,59 @@ +module f90_return_integer + contains + function t0(value) + integer :: value + integer :: t0 + t0 = value + end function t0 + function t1(value) + integer(kind=1) :: value + integer(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + integer(kind=2) :: value + integer(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + integer(kind=4) :: value + integer(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + integer(kind=8) :: value + integer(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + integer :: value + integer :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + integer(kind=1) :: value + integer(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + integer(kind=2) :: value + integer(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + integer(kind=4) :: value + integer(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + integer(kind=8) :: value + integer(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_integer diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_logical/foo77.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_logical/foo77.f new file mode 100644 index 0000000000000000000000000000000000000000..a886ec6f409c12d59110e39561ce78c920c9c37d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_logical/foo77.f @@ -0,0 +1,56 @@ + function t0(value) + logical value + logical t0 + t0 = value + end + function t1(value) + logical*1 value + logical*1 t1 + t1 = value + end + function t2(value) + logical*2 value + logical*2 t2 + t2 = value + end + function t4(value) + logical*4 value + logical*4 t4 + t4 = value + end +c function t8(value) +c logical*8 value +c logical*8 t8 +c t8 = value +c end + + subroutine s0(t0,value) + logical value + logical t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + logical*1 value + logical*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + logical*2 value + logical*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + logical*4 value + logical*4 t4 +cf2py intent(out) t4 + t4 = value + end +c subroutine s8(t8,value) +c logical*8 value +c logical*8 t8 +cf2py intent(out) t8 +c t8 = value +c end diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_logical/foo90.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_logical/foo90.f90 new file mode 100644 index 0000000000000000000000000000000000000000..12e2fcf5b28def4db59d8ddcb79723cac2ee4e24 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_logical/foo90.f90 @@ -0,0 +1,59 @@ +module f90_return_logical + contains + function t0(value) + logical :: value + logical :: t0 + t0 = value + end function t0 + function t1(value) + logical(kind=1) :: value + logical(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + logical(kind=2) :: value + logical(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + logical(kind=4) :: value + logical(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + logical(kind=8) :: value + logical(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + logical :: value + logical :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + logical(kind=1) :: value + logical(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + logical(kind=2) :: value + logical(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + logical(kind=4) :: value + logical(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + logical(kind=8) :: value + logical(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_logical diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_real/foo77.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_real/foo77.f new file mode 100644 index 0000000000000000000000000000000000000000..66201632eb02c732cad0043a6880b4f4ebd4878c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_real/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + real value + real t0 + t0 = value + end + function t4(value) + real*4 value + real*4 t4 + t4 = value + end + function t8(value) + real*8 value + real*8 t8 + t8 = value + end + function td(value) + double precision value + double precision td + td = value + end + + subroutine s0(t0,value) + real value + real t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s4(t4,value) + real*4 value + real*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + real*8 value + real*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine sd(td,value) + double precision value + double precision td +cf2py intent(out) td + td = value + end diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_real/foo90.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_real/foo90.f90 new file mode 100644 index 0000000000000000000000000000000000000000..54a61f849b25572afe68064cffc04688c80f6962 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/return_real/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_real + contains + function t0(value) + real :: value + real :: t0 + t0 = value + end function t0 + function t4(value) + real(kind=4) :: value + real(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + real(kind=8) :: value + real(kind=8) :: t8 + t8 = value + end function t8 + function td(value) + double precision :: value + double precision :: td + td = value + end function td + + subroutine s0(t0,value) + real :: value + real :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s4(t4,value) + real(kind=4) :: value + real(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + real(kind=8) :: value + real(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine sd(td,value) + double precision :: value + double precision :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_real diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/size/foo.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/size/foo.f90 new file mode 100644 index 0000000000000000000000000000000000000000..2ad165877748ed6084daa804d9d57ee011c8f55a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/size/foo.f90 @@ -0,0 +1,44 @@ + +subroutine foo(a, n, m, b) + implicit none + + real, intent(in) :: a(n, m) + integer, intent(in) :: n, m + real, intent(out) :: b(size(a, 1)) + + integer :: i + + do i = 1, size(b) + b(i) = sum(a(i,:)) + enddo +end subroutine + +subroutine trans(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x,2), size(x,1) ) :: y + integer :: N, M, i, j + N = size(x,1) + M = size(x,2) + DO i=1,N + do j=1,M + y(j,i) = x(i,j) + END DO + END DO +end subroutine trans + +subroutine flatten(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x) ) :: y + integer :: N, M, i, j, k + N = size(x,1) + M = size(x,2) + k = 1 + DO i=1,N + do j=1,M + y(k) = x(i,j) + k = k + 1 + END DO + END DO +end subroutine flatten diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/char.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/char.f90 new file mode 100644 index 0000000000000000000000000000000000000000..242bbef28f21b3fa2a4b340364df6b22a0d647c6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/char.f90 @@ -0,0 +1,29 @@ +MODULE char_test + +CONTAINS + +SUBROUTINE change_strings(strings, n_strs, out_strings) + IMPLICIT NONE + + ! Inputs + INTEGER, INTENT(IN) :: n_strs + CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings + CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: out_strings + +!f2py INTEGER, INTENT(IN) :: n_strs +!f2py CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings +!f2py CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: strings + + ! Misc. + INTEGER*4 :: j + + + DO j=1, n_strs + out_strings(1,j) = strings(1,j) + out_strings(2,j) = 'A' + END DO + +END SUBROUTINE change_strings + +END MODULE char_test + diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/fixed_string.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/fixed_string.f90 new file mode 100644 index 0000000000000000000000000000000000000000..8c8e5a3e5ed8dea480b1be257b647c12da0ed2ca --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/fixed_string.f90 @@ -0,0 +1,34 @@ +function sint(s) result(i) + implicit none + character(len=*) :: s + integer :: j, i + i = 0 + do j=len(s), 1, -1 + if (.not.((i.eq.0).and.(s(j:j).eq.' '))) then + i = i + ichar(s(j:j)) * 10 ** (j - 1) + endif + end do + return + end function sint + + function test_in_bytes4(a) result (i) + implicit none + integer :: sint + character(len=4) :: a + integer :: i + i = sint(a) + a(1:1) = 'A' + return + end function test_in_bytes4 + + function test_inout_bytes4(a) result (i) + implicit none + integer :: sint + character(len=4), intent(inout) :: a + integer :: i + if (a(1:1).ne.' ') then + a(1:1) = 'E' + endif + i = sint(a) + return + end function test_inout_bytes4 diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh24008.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh24008.f new file mode 100644 index 0000000000000000000000000000000000000000..63afd46530848ba5cc5e7d30987e786234caf590 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh24008.f @@ -0,0 +1,8 @@ + SUBROUTINE GREET(NAME, GREETING) + CHARACTER NAME*(*), GREETING*(*) + CHARACTER*(50) MESSAGE + + MESSAGE = 'Hello, ' // NAME // ', ' // GREETING +c$$$ PRINT *, MESSAGE + + END SUBROUTINE GREET diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh24662.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh24662.f90 new file mode 100644 index 0000000000000000000000000000000000000000..5840eba39bf37014646ca25add39ab1e486e8802 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh24662.f90 @@ -0,0 +1,7 @@ +subroutine string_inout_optional(output) + implicit none + character*(32), optional, intent(inout) :: output + if (present(output)) then + output="output string" + endif +end subroutine diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh25286.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh25286.f90 new file mode 100644 index 0000000000000000000000000000000000000000..d2a3b056fae3f04f6daf71512af11d43dd848b35 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh25286.f90 @@ -0,0 +1,14 @@ +subroutine charint(trans, info) + character, intent(in) :: trans + integer, intent(out) :: info + if (trans == 'N') then + info = 1 + else if (trans == 'T') then + info = 2 + else if (trans == 'C') then + info = 3 + else + info = -1 + end if + +end subroutine charint diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh25286.pyf b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh25286.pyf new file mode 100644 index 0000000000000000000000000000000000000000..40c8b62fdd4fde0a80e6cfbff9e6282167f6b341 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh25286.pyf @@ -0,0 +1,12 @@ +python module _char_handling_test + interface + subroutine charint(trans, info) + callstatement (*f2py_func)(&trans, &info) + callprotoargument char*, int* + + character, intent(in), check(trans=='N'||trans=='T'||trans=='C') :: trans = 'N' + integer intent(out) :: info + + end subroutine charint + end interface +end python module _char_handling_test diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh25286_bc.pyf b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh25286_bc.pyf new file mode 100644 index 0000000000000000000000000000000000000000..e49ce2c9cfe3030a5ca83481b5bb980c847a5950 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/gh25286_bc.pyf @@ -0,0 +1,12 @@ +python module _char_handling_test + interface + subroutine charint(trans, info) + callstatement (*f2py_func)(&trans, &info) + callprotoargument char*, int* + + character, intent(in), check(*trans=='N'||*trans=='T'||*trans=='C') :: trans = 'N' + integer intent(out) :: info + + end subroutine charint + end interface +end python module _char_handling_test diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/scalar_string.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/scalar_string.f90 new file mode 100644 index 0000000000000000000000000000000000000000..a9fd8e4afb1451474d561c0b40add20cdcac51b0 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/scalar_string.f90 @@ -0,0 +1,9 @@ +MODULE string_test + + character(len=8) :: string + character string77 * 8 + + character(len=12), dimension(5,7) :: strarr + character strarr77(5,7) * 12 + +END MODULE string_test diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/string.f b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/string.f new file mode 100644 index 0000000000000000000000000000000000000000..f5fb3c8293d7598cc6be8f14d714fd102fa1711a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/string/string.f @@ -0,0 +1,12 @@ +C FILE: STRING.F + SUBROUTINE FOO(A,B,C,D) + CHARACTER*5 A, B + CHARACTER*(*) C,D +Cf2py intent(in) a,c +Cf2py intent(inout) b,d + A(1:1) = 'A' + B(1:1) = 'B' + C(1:1) = 'C' + D(1:1) = 'D' + END +C END OF FILE STRING.F diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 b/phivenv/Lib/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 new file mode 100644 index 0000000000000000000000000000000000000000..d8dd1beff4d2d2a07b4955afde4c5b4e27e193d9 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 @@ -0,0 +1,9 @@ +module fortfuncs + implicit none +contains + subroutine square(x,y) + integer, intent(in), value :: x + integer, intent(out) :: y + y = x*x + end subroutine square +end module fortfuncs diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_abstract_interface.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_abstract_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..4948568a43172f5d04d2a7062809d3822d079460 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_abstract_interface.py @@ -0,0 +1,26 @@ +from pathlib import Path +import pytest +import textwrap +from . import util +from numpy.f2py import crackfortran +from numpy.testing import IS_WASM + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.slow +class TestAbstractInterface(util.F2PyTest): + sources = [util.getpath("tests", "src", "abstract_interface", "foo.f90")] + + skip = ["add1", "add2"] + + def test_abstract_interface(self): + assert self.module.ops_module.foo(3, 5) == (8, 13) + + def test_parse_abstract_interface(self): + # Test gh18403 + fpath = util.getpath("tests", "src", "abstract_interface", + "gh18403_mod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + assert len(mod[0]["body"]) == 1 + assert mod[0]["body"][0]["block"] == "abstract interface" diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_array_from_pyobj.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_array_from_pyobj.py new file mode 100644 index 0000000000000000000000000000000000000000..d10f1d59df3f1e8cfbc4dd264efd26c0b32c2f84 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_array_from_pyobj.py @@ -0,0 +1,682 @@ +import os +import sys +import copy +import platform +import pytest +from pathlib import Path + +import numpy as np + +from numpy.testing import assert_, assert_equal +from numpy._core._type_aliases import c_names_dict as _c_names_dict +from . import util + +wrap = None + +# Extend core typeinfo with CHARACTER to test dtype('c') +c_names_dict = dict( + CHARACTER=np.dtype("c"), + **_c_names_dict +) + + +def get_testdir(): + testroot = Path(__file__).resolve().parent / "src" + return testroot / "array_from_pyobj" + +def setup_module(): + """ + Build the required testing extension module + + """ + global wrap + + if wrap is None: + src = [ + get_testdir() / "wrapmodule.c", + ] + wrap = util.build_meson(src, module_name = "test_array_from_pyobj_ext") + + +def flags_info(arr): + flags = wrap.array_attrs(arr)[6] + return flags2names(flags) + + +def flags2names(flags): + info = [] + for flagname in [ + "CONTIGUOUS", + "FORTRAN", + "OWNDATA", + "ENSURECOPY", + "ENSUREARRAY", + "ALIGNED", + "NOTSWAPPED", + "WRITEABLE", + "WRITEBACKIFCOPY", + "UPDATEIFCOPY", + "BEHAVED", + "BEHAVED_RO", + "CARRAY", + "FARRAY", + ]: + if abs(flags) & getattr(wrap, flagname, 0): + info.append(flagname) + return info + + +class Intent: + def __init__(self, intent_list=[]): + self.intent_list = intent_list[:] + flags = 0 + for i in intent_list: + if i == "optional": + flags |= wrap.F2PY_OPTIONAL + else: + flags |= getattr(wrap, "F2PY_INTENT_" + i.upper()) + self.flags = flags + + def __getattr__(self, name): + name = name.lower() + if name == "in_": + name = "in" + return self.__class__(self.intent_list + [name]) + + def __str__(self): + return "intent(%s)" % (",".join(self.intent_list)) + + def __repr__(self): + return "Intent(%r)" % (self.intent_list) + + def is_intent(self, *names): + for name in names: + if name not in self.intent_list: + return False + return True + + def is_intent_exact(self, *names): + return len(self.intent_list) == len(names) and self.is_intent(*names) + + +intent = Intent() + +_type_names = [ + "BOOL", + "BYTE", + "UBYTE", + "SHORT", + "USHORT", + "INT", + "UINT", + "LONG", + "ULONG", + "LONGLONG", + "ULONGLONG", + "FLOAT", + "DOUBLE", + "CFLOAT", + "STRING1", + "STRING5", + "CHARACTER", +] + +_cast_dict = {"BOOL": ["BOOL"]} +_cast_dict["BYTE"] = _cast_dict["BOOL"] + ["BYTE"] +_cast_dict["UBYTE"] = _cast_dict["BOOL"] + ["UBYTE"] +_cast_dict["BYTE"] = ["BYTE"] +_cast_dict["UBYTE"] = ["UBYTE"] +_cast_dict["SHORT"] = _cast_dict["BYTE"] + ["UBYTE", "SHORT"] +_cast_dict["USHORT"] = _cast_dict["UBYTE"] + ["BYTE", "USHORT"] +_cast_dict["INT"] = _cast_dict["SHORT"] + ["USHORT", "INT"] +_cast_dict["UINT"] = _cast_dict["USHORT"] + ["SHORT", "UINT"] + +_cast_dict["LONG"] = _cast_dict["INT"] + ["LONG"] +_cast_dict["ULONG"] = _cast_dict["UINT"] + ["ULONG"] + +_cast_dict["LONGLONG"] = _cast_dict["LONG"] + ["LONGLONG"] +_cast_dict["ULONGLONG"] = _cast_dict["ULONG"] + ["ULONGLONG"] + +_cast_dict["FLOAT"] = _cast_dict["SHORT"] + ["USHORT", "FLOAT"] +_cast_dict["DOUBLE"] = _cast_dict["INT"] + ["UINT", "FLOAT", "DOUBLE"] + +_cast_dict["CFLOAT"] = _cast_dict["FLOAT"] + ["CFLOAT"] + +_cast_dict['STRING1'] = ['STRING1'] +_cast_dict['STRING5'] = ['STRING5'] +_cast_dict['CHARACTER'] = ['CHARACTER'] + +# 32 bit system malloc typically does not provide the alignment required by +# 16 byte long double types this means the inout intent cannot be satisfied +# and several tests fail as the alignment flag can be randomly true or fals +# when numpy gains an aligned allocator the tests could be enabled again +# +# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. +if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) + and sys.platform != "win32" + and (platform.system(), platform.processor()) != ("Darwin", "arm")): + _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"]) + _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [ + "ULONG", + "FLOAT", + "DOUBLE", + "LONGDOUBLE", + ] + _cast_dict["CLONGDOUBLE"] = _cast_dict["LONGDOUBLE"] + [ + "CFLOAT", + "CDOUBLE", + "CLONGDOUBLE", + ] + _cast_dict["CDOUBLE"] = _cast_dict["DOUBLE"] + ["CFLOAT", "CDOUBLE"] + + +class Type: + _type_cache = {} + + def __new__(cls, name): + if isinstance(name, np.dtype): + dtype0 = name + name = None + for n, i in c_names_dict.items(): + if not isinstance(i, type) and dtype0.type is i.type: + name = n + break + obj = cls._type_cache.get(name.upper(), None) + if obj is not None: + return obj + obj = object.__new__(cls) + obj._init(name) + cls._type_cache[name.upper()] = obj + return obj + + def _init(self, name): + self.NAME = name.upper() + + if self.NAME == 'CHARACTER': + info = c_names_dict[self.NAME] + self.type_num = getattr(wrap, 'NPY_STRING') + self.elsize = 1 + self.dtype = np.dtype('c') + elif self.NAME.startswith('STRING'): + info = c_names_dict[self.NAME[:6]] + self.type_num = getattr(wrap, 'NPY_STRING') + self.elsize = int(self.NAME[6:] or 0) + self.dtype = np.dtype(f'S{self.elsize}') + else: + info = c_names_dict[self.NAME] + self.type_num = getattr(wrap, 'NPY_' + self.NAME) + self.elsize = info.itemsize + self.dtype = np.dtype(info.type) + + assert self.type_num == info.num + self.type = info.type + self.dtypechar = info.char + + def __repr__(self): + return (f"Type({self.NAME})|type_num={self.type_num}," + f" dtype={self.dtype}," + f" type={self.type}, elsize={self.elsize}," + f" dtypechar={self.dtypechar}") + + def cast_types(self): + return [self.__class__(_m) for _m in _cast_dict[self.NAME]] + + def all_types(self): + return [self.__class__(_m) for _m in _type_names] + + def smaller_types(self): + bits = c_names_dict[self.NAME].alignment + types = [] + for name in _type_names: + if c_names_dict[name].alignment < bits: + types.append(Type(name)) + return types + + def equal_types(self): + bits = c_names_dict[self.NAME].alignment + types = [] + for name in _type_names: + if name == self.NAME: + continue + if c_names_dict[name].alignment == bits: + types.append(Type(name)) + return types + + def larger_types(self): + bits = c_names_dict[self.NAME].alignment + types = [] + for name in _type_names: + if c_names_dict[name].alignment > bits: + types.append(Type(name)) + return types + + +class Array: + + def __repr__(self): + return (f'Array({self.type}, {self.dims}, {self.intent},' + f' {self.obj})|arr={self.arr}') + + def __init__(self, typ, dims, intent, obj): + self.type = typ + self.dims = dims + self.intent = intent + self.obj_copy = copy.deepcopy(obj) + self.obj = obj + + # arr.dtypechar may be different from typ.dtypechar + self.arr = wrap.call(typ.type_num, + typ.elsize, + dims, intent.flags, obj) + + assert isinstance(self.arr, np.ndarray) + + self.arr_attr = wrap.array_attrs(self.arr) + + if len(dims) > 1: + if self.intent.is_intent("c"): + assert (intent.flags & wrap.F2PY_INTENT_C) + assert not self.arr.flags["FORTRAN"] + assert self.arr.flags["CONTIGUOUS"] + assert (not self.arr_attr[6] & wrap.FORTRAN) + else: + assert (not intent.flags & wrap.F2PY_INTENT_C) + assert self.arr.flags["FORTRAN"] + assert not self.arr.flags["CONTIGUOUS"] + assert (self.arr_attr[6] & wrap.FORTRAN) + + if obj is None: + self.pyarr = None + self.pyarr_attr = None + return + + if intent.is_intent("cache"): + assert isinstance(obj, np.ndarray), repr(type(obj)) + self.pyarr = np.array(obj).reshape(*dims).copy() + else: + self.pyarr = np.array( + np.array(obj, dtype=typ.dtypechar).reshape(*dims), + order=self.intent.is_intent("c") and "C" or "F", + ) + assert self.pyarr.dtype == typ + self.pyarr.setflags(write=self.arr.flags["WRITEABLE"]) + assert self.pyarr.flags["OWNDATA"], (obj, intent) + self.pyarr_attr = wrap.array_attrs(self.pyarr) + + if len(dims) > 1: + if self.intent.is_intent("c"): + assert not self.pyarr.flags["FORTRAN"] + assert self.pyarr.flags["CONTIGUOUS"] + assert (not self.pyarr_attr[6] & wrap.FORTRAN) + else: + assert self.pyarr.flags["FORTRAN"] + assert not self.pyarr.flags["CONTIGUOUS"] + assert (self.pyarr_attr[6] & wrap.FORTRAN) + + assert self.arr_attr[1] == self.pyarr_attr[1] # nd + assert self.arr_attr[2] == self.pyarr_attr[2] # dimensions + if self.arr_attr[1] <= 1: + assert self.arr_attr[3] == self.pyarr_attr[3], repr(( + self.arr_attr[3], + self.pyarr_attr[3], + self.arr.tobytes(), + self.pyarr.tobytes(), + )) # strides + assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], repr(( + self.arr_attr[5], self.pyarr_attr[5] + )) # descr + assert self.arr_attr[6] == self.pyarr_attr[6], repr(( + self.arr_attr[6], + self.pyarr_attr[6], + flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), + flags2names(self.arr_attr[6]), + intent, + )) # flags + + if intent.is_intent("cache"): + assert self.arr_attr[5][3] >= self.type.elsize + else: + assert self.arr_attr[5][3] == self.type.elsize + assert (self.arr_equal(self.pyarr, self.arr)) + + if isinstance(self.obj, np.ndarray): + if typ.elsize == Type(obj.dtype).elsize: + if not intent.is_intent("copy") and self.arr_attr[1] <= 1: + assert self.has_shared_memory() + + def arr_equal(self, arr1, arr2): + if arr1.shape != arr2.shape: + return False + return (arr1 == arr2).all() + + def __str__(self): + return str(self.arr) + + def has_shared_memory(self): + """Check that created array shares data with input array.""" + if self.obj is self.arr: + return True + if not isinstance(self.obj, np.ndarray): + return False + obj_attr = wrap.array_attrs(self.obj) + return obj_attr[0] == self.arr_attr[0] + + +class TestIntent: + def test_in_out(self): + assert str(intent.in_.out) == "intent(in,out)" + assert intent.in_.c.is_intent("c") + assert not intent.in_.c.is_intent_exact("c") + assert intent.in_.c.is_intent_exact("c", "in") + assert intent.in_.c.is_intent_exact("in", "c") + assert not intent.in_.is_intent("c") + + +class TestSharedMemory: + + @pytest.fixture(autouse=True, scope="class", params=_type_names) + def setup_type(self, request): + request.cls.type = Type(request.param) + request.cls.array = lambda self, dims, intent, obj: Array( + Type(request.param), dims, intent, obj) + + @property + def num2seq(self): + if self.type.NAME.startswith('STRING'): + elsize = self.type.elsize + return ['1' * elsize, '2' * elsize] + return [1, 2] + + @property + def num23seq(self): + if self.type.NAME.startswith('STRING'): + elsize = self.type.elsize + return [['1' * elsize, '2' * elsize, '3' * elsize], + ['4' * elsize, '5' * elsize, '6' * elsize]] + return [[1, 2, 3], [4, 5, 6]] + + def test_in_from_2seq(self): + a = self.array([2], intent.in_, self.num2seq) + assert not a.has_shared_memory() + + def test_in_from_2casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory(), repr((self.type.dtype, t.dtype)) + else: + assert not a.has_shared_memory() + + @pytest.mark.parametrize("write", ["w", "ro"]) + @pytest.mark.parametrize("order", ["C", "F"]) + @pytest.mark.parametrize("inp", ["2seq", "23seq"]) + def test_in_nocopy(self, write, order, inp): + """Test if intent(in) array can be passed without copies""" + seq = getattr(self, "num" + inp) + obj = np.array(seq, dtype=self.type.dtype, order=order) + obj.setflags(write=(write == 'w')) + a = self.array(obj.shape, + ((order == 'C' and intent.in_.c) or intent.in_), obj) + assert a.has_shared_memory() + + def test_inout_2seq(self): + obj = np.array(self.num2seq, dtype=self.type.dtype) + a = self.array([len(self.num2seq)], intent.inout, obj) + assert a.has_shared_memory() + + try: + a = self.array([2], intent.in_.inout, self.num2seq) + except TypeError as msg: + if not str(msg).startswith( + "failed to initialize intent(inout|inplace|cache) array"): + raise + else: + raise SystemError("intent(inout) should have failed on sequence") + + def test_f_inout_23seq(self): + obj = np.array(self.num23seq, dtype=self.type.dtype, order="F") + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.inout, obj) + assert a.has_shared_memory() + + obj = np.array(self.num23seq, dtype=self.type.dtype, order="C") + shape = (len(self.num23seq), len(self.num23seq[0])) + try: + a = self.array(shape, intent.in_.inout, obj) + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(inout) array"): + raise + else: + raise SystemError( + "intent(inout) should have failed on improper array") + + def test_c_inout_23seq(self): + obj = np.array(self.num23seq, dtype=self.type.dtype) + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.c.inout, obj) + assert a.has_shared_memory() + + def test_in_copy_from_2casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_.copy, obj) + assert not a.has_shared_memory() + + def test_c_in_from_23seq(self): + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, + self.num23seq) + assert not a.has_shared_memory() + + def test_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) + assert not a.has_shared_memory() + + def test_f_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype, order="F") + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory() + else: + assert not a.has_shared_memory() + + def test_c_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory() + else: + assert not a.has_shared_memory() + + def test_f_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype, order="F") + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.copy, + obj) + assert not a.has_shared_memory() + + def test_c_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy, + obj) + assert not a.has_shared_memory() + + def test_in_cache_from_2casttype(self): + for t in self.type.all_types(): + if t.elsize != self.type.elsize: + continue + obj = np.array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq), ) + a = self.array(shape, intent.in_.c.cache, obj) + assert a.has_shared_memory() + + a = self.array(shape, intent.in_.cache, obj) + assert a.has_shared_memory() + + obj = np.array(self.num2seq, dtype=t.dtype, order="F") + a = self.array(shape, intent.in_.c.cache, obj) + assert a.has_shared_memory() + + a = self.array(shape, intent.in_.cache, obj) + assert a.has_shared_memory(), repr(t.dtype) + + try: + a = self.array(shape, intent.in_.cache, obj[::-1]) + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(cache) array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on multisegmented array") + + def test_in_cache_from_2casttype_failure(self): + for t in self.type.all_types(): + if t.NAME == 'STRING': + # string elsize is 0, so skipping the test + continue + if t.elsize >= self.type.elsize: + continue + is_int = np.issubdtype(t.dtype, np.integer) + if is_int and int(self.num2seq[0]) > np.iinfo(t.dtype).max: + # skip test if num2seq would trigger an overflow error + continue + obj = np.array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq), ) + try: + self.array(shape, intent.in_.cache, obj) # Should succeed + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(cache) array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on smaller array") + + def test_cache_hidden(self): + shape = (2, ) + a = self.array(shape, intent.cache.hide, None) + assert a.arr.shape == shape + + shape = (2, 3) + a = self.array(shape, intent.cache.hide, None) + assert a.arr.shape == shape + + shape = (-1, 3) + try: + a = self.array(shape, intent.cache.hide, None) + except ValueError as msg: + if not str(msg).startswith( + "failed to create intent(cache|hide)|optional array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on undefined dimensions") + + def test_hidden(self): + shape = (2, ) + a = self.array(shape, intent.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + + shape = (2, 3) + a = self.array(shape, intent.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] + + shape = (2, 3) + a = self.array(shape, intent.c.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] + + shape = (-1, 3) + try: + a = self.array(shape, intent.hide, None) + except ValueError as msg: + if not str(msg).startswith( + "failed to create intent(cache|hide)|optional array"): + raise + else: + raise SystemError( + "intent(hide) should have failed on undefined dimensions") + + def test_optional_none(self): + shape = (2, ) + a = self.array(shape, intent.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + + shape = (2, 3) + a = self.array(shape, intent.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] + + shape = (2, 3) + a = self.array(shape, intent.c.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] + + def test_optional_from_2seq(self): + obj = self.num2seq + shape = (len(obj), ) + a = self.array(shape, intent.optional, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + def test_optional_from_23seq(self): + obj = self.num23seq + shape = (len(obj), len(obj[0])) + a = self.array(shape, intent.optional, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + a = self.array(shape, intent.optional.c, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + def test_inplace(self): + obj = np.array(self.num23seq, dtype=self.type.dtype) + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) + a.arr[1][2] = 54 + assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype) + assert a.arr is obj + assert obj.flags["FORTRAN"] # obj attributes are changed inplace! + assert not obj.flags["CONTIGUOUS"] + + def test_inplace_from_casttype(self): + for t in self.type.cast_types(): + if t is self.type: + continue + obj = np.array(self.num23seq, dtype=t.dtype) + assert obj.dtype.type == t.type + assert obj.dtype.type is not self.type.type + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) + a.arr[1][2] = 54 + assert obj[1][2] == a.arr[1][2] == np.array(54, + dtype=self.type.dtype) + assert a.arr is obj + assert obj.flags["FORTRAN"] # obj attributes changed inplace! + assert not obj.flags["CONTIGUOUS"] + assert obj.dtype.type is self.type.type # obj changed inplace! diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_assumed_shape.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_assumed_shape.py new file mode 100644 index 0000000000000000000000000000000000000000..7076892fd97b09cba4a640f8b65d240d3245ed2d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_assumed_shape.py @@ -0,0 +1,49 @@ +import os +import pytest +import tempfile + +from . import util + + +class TestAssumedShapeSumExample(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "assumed_shape", "foo_free.f90"), + util.getpath("tests", "src", "assumed_shape", "foo_use.f90"), + util.getpath("tests", "src", "assumed_shape", "precision.f90"), + util.getpath("tests", "src", "assumed_shape", "foo_mod.f90"), + util.getpath("tests", "src", "assumed_shape", ".f2py_f2cmap"), + ] + + @pytest.mark.slow + def test_all(self): + r = self.module.fsum([1, 2]) + assert r == 3 + r = self.module.sum([1, 2]) + assert r == 3 + r = self.module.sum_with_use([1, 2]) + assert r == 3 + + r = self.module.mod.sum([1, 2]) + assert r == 3 + r = self.module.mod.fsum([1, 2]) + assert r == 3 + + +class TestF2cmapOption(TestAssumedShapeSumExample): + def setup_method(self): + # Use a custom file name for .f2py_f2cmap + self.sources = list(self.sources) + f2cmap_src = self.sources.pop(-1) + + self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False) + with open(f2cmap_src, "rb") as f: + self.f2cmap_file.write(f.read()) + self.f2cmap_file.close() + + self.sources.append(self.f2cmap_file.name) + self.options = ["--f2cmap", self.f2cmap_file.name] + + super().setup_method() + + def teardown_method(self): + os.unlink(self.f2cmap_file.name) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_block_docstring.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_block_docstring.py new file mode 100644 index 0000000000000000000000000000000000000000..55a12d7ad909231f27df6dfceef10e955ef9061b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_block_docstring.py @@ -0,0 +1,18 @@ +import sys +import pytest +from . import util + +from numpy.testing import IS_PYPY + + +@pytest.mark.slow +class TestBlockDocString(util.F2PyTest): + sources = [util.getpath("tests", "src", "block_docstring", "foo.f")] + + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_block_docstring(self): + expected = "bar : 'i'-array(2,3)\n" + assert self.module.block.__doc__ == expected diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_callback.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_callback.py new file mode 100644 index 0000000000000000000000000000000000000000..7761a1c3d010488b9f0ba6464f2a945b85619d3b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_callback.py @@ -0,0 +1,246 @@ +import math +import textwrap +import sys +import pytest +import threading +import traceback +import time + +import numpy as np +from numpy.testing import IS_PYPY +from . import util + + +class TestF77Callback(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "foo.f")] + + @pytest.mark.parametrize("name", "t,t2".split(",")) + @pytest.mark.slow + def test_all(self, name): + self.check_function(name) + + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_docstring(self): + expected = textwrap.dedent("""\ + a = t(fun,[fun_extra_args]) + + Wrapper for ``t``. + + Parameters + ---------- + fun : call-back function + + Other Parameters + ---------------- + fun_extra_args : input tuple, optional + Default: () + + Returns + ------- + a : int + + Notes + ----- + Call-back functions:: + + def fun(): return a + Return objects: + a : int + """) + assert self.module.t.__doc__ == expected + + def check_function(self, name): + t = getattr(self.module, name) + r = t(lambda: 4) + assert r == 4 + r = t(lambda a: 5, fun_extra_args=(6, )) + assert r == 5 + r = t(lambda a: a, fun_extra_args=(6, )) + assert r == 6 + r = t(lambda a: 5 + a, fun_extra_args=(7, )) + assert r == 12 + r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi, )) + assert r == 180 + r = t(math.degrees, fun_extra_args=(math.pi, )) + assert r == 180 + + r = t(self.module.func, fun_extra_args=(6, )) + assert r == 17 + r = t(self.module.func0) + assert r == 11 + r = t(self.module.func0._cpointer) + assert r == 11 + + class A: + def __call__(self): + return 7 + + def mth(self): + return 9 + + a = A() + r = t(a) + assert r == 7 + r = t(a.mth) + assert r == 9 + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback(self): + def callback(code): + if code == "r": + return 0 + else: + return 1 + + f = getattr(self.module, "string_callback") + r = f(callback) + assert r == 0 + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback_array(self): + # See gh-10027 + cu1 = np.zeros((1, ), "S8") + cu2 = np.zeros((1, 8), "c") + cu3 = np.array([""], "S8") + + def callback(cu, lencu): + if cu.shape != (lencu,): + return 1 + if cu.dtype != "S8": + return 2 + if not np.all(cu == b""): + return 3 + return 0 + + f = getattr(self.module, "string_callback_array") + for cu in [cu1, cu2, cu3]: + res = f(callback, cu, cu.size) + assert res == 0 + + def test_threadsafety(self): + # Segfaults if the callback handling is not threadsafe + + errors = [] + + def cb(): + # Sleep here to make it more likely for another thread + # to call their callback at the same time. + time.sleep(1e-3) + + # Check reentrancy + r = self.module.t(lambda: 123) + assert r == 123 + + return 42 + + def runner(name): + try: + for j in range(50): + r = self.module.t(cb) + assert r == 42 + self.check_function(name) + except Exception: + errors.append(traceback.format_exc()) + + threads = [ + threading.Thread(target=runner, args=(arg, )) + for arg in ("t", "t2") for n in range(20) + ] + + for t in threads: + t.start() + + for t in threads: + t.join() + + errors = "\n\n".join(errors) + if errors: + raise AssertionError(errors) + + def test_hidden_callback(self): + try: + self.module.hidden_callback(2) + except Exception as msg: + assert str(msg).startswith("Callback global_f not defined") + + try: + self.module.hidden_callback2(2) + except Exception as msg: + assert str(msg).startswith("cb: Callback global_f not defined") + + self.module.global_f = lambda x: x + 1 + r = self.module.hidden_callback(2) + assert r == 3 + + self.module.global_f = lambda x: x + 2 + r = self.module.hidden_callback(2) + assert r == 4 + + del self.module.global_f + try: + self.module.hidden_callback(2) + except Exception as msg: + assert str(msg).startswith("Callback global_f not defined") + + self.module.global_f = lambda x=0: x + 3 + r = self.module.hidden_callback(2) + assert r == 5 + + # reproducer of gh18341 + r = self.module.hidden_callback2(2) + assert r == 3 + + +class TestF77CallbackPythonTLS(TestF77Callback): + """ + Callback tests using Python thread-local storage instead of + compiler-provided + """ + + options = ["-DF2PY_USE_PYTHON_TLS"] + + +class TestF90Callback(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh17797.f90")] + + @pytest.mark.slow + def test_gh17797(self): + def incr(x): + return x + 123 + + y = np.array([1, 2, 3], dtype=np.int64) + r = self.module.gh17797(incr, y) + assert r == 123 + 1 + 2 + 3 + + +class TestGH18335(util.F2PyTest): + """The reproduction of the reported issue requires specific input that + extensions may break the issue conditions, so the reproducer is + implemented as a separate test class. Do not extend this test with + other tests! + """ + sources = [util.getpath("tests", "src", "callback", "gh18335.f90")] + + @pytest.mark.slow + def test_gh18335(self): + def foo(x): + x[0] += 1 + + r = self.module.gh18335(foo) + assert r == 123 + 1 + + +class TestGH25211(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh25211.f"), + util.getpath("tests", "src", "callback", "gh25211.pyf")] + module_name = "callback2" + + def test_gh25211(self): + def bar(x): + return x*x + + res = self.module.foo(bar) + assert res == 110 diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_character.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_character.py new file mode 100644 index 0000000000000000000000000000000000000000..81a66e151b336703218bdad175aa5019e914f6f2 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_character.py @@ -0,0 +1,639 @@ +import pytest +import textwrap +from numpy.testing import assert_array_equal, assert_equal, assert_raises +import numpy as np +from numpy.f2py.tests import util + + +@pytest.mark.slow +class TestCharacterString(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_character_string' + length_list = ['1', '3', 'star'] + + code = '' + for length in length_list: + fsuffix = length + clength = dict(star='(*)').get(length, length) + + code += textwrap.dedent(f""" + + subroutine {fprefix}_input_{fsuffix}(c, o, n) + character*{clength}, intent(in) :: c + integer n + !f2py integer, depend(c), intent(hide) :: n = slen(c) + integer*1, dimension(n) :: o + !f2py intent(out) o + o = transfer(c, o) + end subroutine {fprefix}_input_{fsuffix} + + subroutine {fprefix}_output_{fsuffix}(c, o, n) + character*{clength}, intent(out) :: c + integer n + integer*1, dimension(n), intent(in) :: o + !f2py integer, depend(o), intent(hide) :: n = len(o) + c = transfer(o, c) + end subroutine {fprefix}_output_{fsuffix} + + subroutine {fprefix}_array_input_{fsuffix}(c, o, m, n) + integer m, i, n + character*{clength}, intent(in), dimension(m) :: c + !f2py integer, depend(c), intent(hide) :: m = len(c) + !f2py integer, depend(c), intent(hide) :: n = f2py_itemsize(c) + integer*1, dimension(m, n), intent(out) :: o + do i=1,m + o(i, :) = transfer(c(i), o(i, :)) + end do + end subroutine {fprefix}_array_input_{fsuffix} + + subroutine {fprefix}_array_output_{fsuffix}(c, o, m, n) + character*{clength}, intent(out), dimension(m) :: c + integer n + integer*1, dimension(m, n), intent(in) :: o + !f2py character(f2py_len=n) :: c + !f2py integer, depend(o), intent(hide) :: m = len(o) + !f2py integer, depend(o), intent(hide) :: n = shape(o, 1) + do i=1,m + c(i) = transfer(o(i, :), c(i)) + end do + end subroutine {fprefix}_array_output_{fsuffix} + + subroutine {fprefix}_2d_array_input_{fsuffix}(c, o, m1, m2, n) + integer m1, m2, i, j, n + character*{clength}, intent(in), dimension(m1, m2) :: c + !f2py integer, depend(c), intent(hide) :: m1 = len(c) + !f2py integer, depend(c), intent(hide) :: m2 = shape(c, 1) + !f2py integer, depend(c), intent(hide) :: n = f2py_itemsize(c) + integer*1, dimension(m1, m2, n), intent(out) :: o + do i=1,m1 + do j=1,m2 + o(i, j, :) = transfer(c(i, j), o(i, j, :)) + end do + end do + end subroutine {fprefix}_2d_array_input_{fsuffix} + """) + + @pytest.mark.parametrize("length", length_list) + def test_input(self, length): + fsuffix = {'(*)': 'star'}.get(length, length) + f = getattr(self.module, self.fprefix + '_input_' + fsuffix) + + a = {'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length] + + assert_array_equal(f(a), np.array(list(map(ord, a)), dtype='u1')) + + @pytest.mark.parametrize("length", length_list[:-1]) + def test_output(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_output_' + fsuffix) + + a = {'1': 'a', '3': 'abc'}[length] + + assert_array_equal(f(np.array(list(map(ord, a)), dtype='u1')), + a.encode()) + + @pytest.mark.parametrize("length", length_list) + def test_array_input(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_array_input_' + fsuffix) + + a = np.array([{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length], + ], dtype='S') + + expected = np.array([[c for c in s] for s in a], dtype='u1') + assert_array_equal(f(a), expected) + + @pytest.mark.parametrize("length", length_list) + def test_array_output(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_array_output_' + fsuffix) + + expected = np.array( + [{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]], dtype='S') + + a = np.array([[c for c in s] for s in expected], dtype='u1') + assert_array_equal(f(a), expected) + + @pytest.mark.parametrize("length", length_list) + def test_2d_array_input(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_2d_array_input_' + fsuffix) + + a = np.array([[{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]], + [{'1': 'f', '3': 'fgh', 'star': 'fghij' * 3}[length], + {'1': 'F', '3': 'FGH', 'star': 'FGHIJ' * 3}[length]]], + dtype='S') + expected = np.array([[[c for c in item] for item in row] for row in a], + dtype='u1', order='F') + assert_array_equal(f(a), expected) + + +class TestCharacter(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_character' + + code = textwrap.dedent(f""" + subroutine {fprefix}_input(c, o) + character, intent(in) :: c + integer*1 o + !f2py intent(out) o + o = transfer(c, o) + end subroutine {fprefix}_input + + subroutine {fprefix}_output(c, o) + character :: c + integer*1, intent(in) :: o + !f2py intent(out) c + c = transfer(o, c) + end subroutine {fprefix}_output + + subroutine {fprefix}_input_output(c, o) + character, intent(in) :: c + character o + !f2py intent(out) o + o = c + end subroutine {fprefix}_input_output + + subroutine {fprefix}_inout(c, n) + character :: c, n + !f2py intent(in) n + !f2py intent(inout) c + c = n + end subroutine {fprefix}_inout + + function {fprefix}_return(o) result (c) + character :: c + character, intent(in) :: o + c = transfer(o, c) + end function {fprefix}_return + + subroutine {fprefix}_array_input(c, o) + character, intent(in) :: c(3) + integer*1 o(3) + !f2py intent(out) o + integer i + do i=1,3 + o(i) = transfer(c(i), o(i)) + end do + end subroutine {fprefix}_array_input + + subroutine {fprefix}_2d_array_input(c, o) + character, intent(in) :: c(2, 3) + integer*1 o(2, 3) + !f2py intent(out) o + integer i, j + do i=1,2 + do j=1,3 + o(i, j) = transfer(c(i, j), o(i, j)) + end do + end do + end subroutine {fprefix}_2d_array_input + + subroutine {fprefix}_array_output(c, o) + character :: c(3) + integer*1, intent(in) :: o(3) + !f2py intent(out) c + do i=1,3 + c(i) = transfer(o(i), c(i)) + end do + end subroutine {fprefix}_array_output + + subroutine {fprefix}_array_inout(c, n) + character :: c(3), n(3) + !f2py intent(in) n(3) + !f2py intent(inout) c(3) + do i=1,3 + c(i) = n(i) + end do + end subroutine {fprefix}_array_inout + + subroutine {fprefix}_2d_array_inout(c, n) + character :: c(2, 3), n(2, 3) + !f2py intent(in) n(2, 3) + !f2py intent(inout) c(2. 3) + integer i, j + do i=1,2 + do j=1,3 + c(i, j) = n(i, j) + end do + end do + end subroutine {fprefix}_2d_array_inout + + function {fprefix}_array_return(o) result (c) + character, dimension(3) :: c + character, intent(in) :: o(3) + do i=1,3 + c(i) = o(i) + end do + end function {fprefix}_array_return + + function {fprefix}_optional(o) result (c) + character, intent(in) :: o + !f2py character o = "a" + character :: c + c = o + end function {fprefix}_optional + """) + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_input(self, dtype): + f = getattr(self.module, self.fprefix + '_input') + + assert_equal(f(np.array('a', dtype=dtype)), ord('a')) + assert_equal(f(np.array(b'a', dtype=dtype)), ord('a')) + assert_equal(f(np.array(['a'], dtype=dtype)), ord('a')) + assert_equal(f(np.array('abc', dtype=dtype)), ord('a')) + assert_equal(f(np.array([['a']], dtype=dtype)), ord('a')) + + def test_input_varia(self): + f = getattr(self.module, self.fprefix + '_input') + + assert_equal(f('a'), ord('a')) + assert_equal(f(b'a'), ord(b'a')) + assert_equal(f(''), 0) + assert_equal(f(b''), 0) + assert_equal(f(b'\0'), 0) + assert_equal(f('ab'), ord('a')) + assert_equal(f(b'ab'), ord('a')) + assert_equal(f(['a']), ord('a')) + + assert_equal(f(np.array(b'a')), ord('a')) + assert_equal(f(np.array([b'a'])), ord('a')) + a = np.array('a') + assert_equal(f(a), ord('a')) + a = np.array(['a']) + assert_equal(f(a), ord('a')) + + try: + f([]) + except IndexError as msg: + if not str(msg).endswith(' got 0-list'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on empty list') + + try: + f(97) + except TypeError as msg: + if not str(msg).endswith(' got int instance'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on int value') + + @pytest.mark.parametrize("dtype", ['c', 'S1', 'U1']) + def test_array_input(self, dtype): + f = getattr(self.module, self.fprefix + '_array_input') + + assert_array_equal(f(np.array(['a', 'b', 'c'], dtype=dtype)), + np.array(list(map(ord, 'abc')), dtype='i1')) + assert_array_equal(f(np.array([b'a', b'b', b'c'], dtype=dtype)), + np.array(list(map(ord, 'abc')), dtype='i1')) + + def test_array_input_varia(self): + f = getattr(self.module, self.fprefix + '_array_input') + assert_array_equal(f(['a', 'b', 'c']), + np.array(list(map(ord, 'abc')), dtype='i1')) + assert_array_equal(f([b'a', b'b', b'c']), + np.array(list(map(ord, 'abc')), dtype='i1')) + + try: + f(['a', 'b', 'c', 'd']) + except ValueError as msg: + if not str(msg).endswith( + 'th dimension must be fixed to 3 but got 4'): + raise + else: + raise SystemError( + f'{f.__name__} should have failed on wrong input') + + @pytest.mark.parametrize("dtype", ['c', 'S1', 'U1']) + def test_2d_array_input(self, dtype): + f = getattr(self.module, self.fprefix + '_2d_array_input') + + a = np.array([['a', 'b', 'c'], + ['d', 'e', 'f']], dtype=dtype, order='F') + expected = a.view(np.uint32 if dtype == 'U1' else np.uint8) + assert_array_equal(f(a), expected) + + def test_output(self): + f = getattr(self.module, self.fprefix + '_output') + + assert_equal(f(ord(b'a')), b'a') + assert_equal(f(0), b'\0') + + def test_array_output(self): + f = getattr(self.module, self.fprefix + '_array_output') + + assert_array_equal(f(list(map(ord, 'abc'))), + np.array(list('abc'), dtype='S1')) + + def test_input_output(self): + f = getattr(self.module, self.fprefix + '_input_output') + + assert_equal(f(b'a'), b'a') + assert_equal(f('a'), b'a') + assert_equal(f(''), b'\0') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_inout') + + a = np.array(list('abc'), dtype=dtype) + f(a, 'A') + assert_array_equal(a, np.array(list('Abc'), dtype=a.dtype)) + f(a[1:], 'B') + assert_array_equal(a, np.array(list('ABc'), dtype=a.dtype)) + + a = np.array(['abc'], dtype=dtype) + f(a, 'A') + assert_array_equal(a, np.array(['Abc'], dtype=a.dtype)) + + def test_inout_varia(self): + f = getattr(self.module, self.fprefix + '_inout') + a = np.array('abc', dtype='S3') + f(a, 'A') + assert_array_equal(a, np.array('Abc', dtype=a.dtype)) + + a = np.array(['abc'], dtype='S3') + f(a, 'A') + assert_array_equal(a, np.array(['Abc'], dtype=a.dtype)) + + try: + f('abc', 'A') + except ValueError as msg: + if not str(msg).endswith(' got 3-str'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on str value') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_array_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_array_inout') + n = np.array(['A', 'B', 'C'], dtype=dtype, order='F') + + a = np.array(['a', 'b', 'c'], dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, n) + + a = np.array(['a', 'b', 'c', 'd'], dtype=dtype) + f(a[1:], n) + assert_array_equal(a, np.array(['a', 'A', 'B', 'C'], dtype=dtype)) + + a = np.array([['a', 'b', 'c']], dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, np.array([['A', 'B', 'C']], dtype=dtype)) + + a = np.array(['a', 'b', 'c', 'd'], dtype=dtype, order='F') + try: + f(a, n) + except ValueError as msg: + if not str(msg).endswith( + 'th dimension must be fixed to 3 but got 4'): + raise + else: + raise SystemError( + f'{f.__name__} should have failed on wrong input') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_2d_array_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_2d_array_inout') + n = np.array([['A', 'B', 'C'], + ['D', 'E', 'F']], + dtype=dtype, order='F') + a = np.array([['a', 'b', 'c'], + ['d', 'e', 'f']], + dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, n) + + def test_return(self): + f = getattr(self.module, self.fprefix + '_return') + + assert_equal(f('a'), b'a') + + @pytest.mark.skip('fortran function returning array segfaults') + def test_array_return(self): + f = getattr(self.module, self.fprefix + '_array_return') + + a = np.array(list('abc'), dtype='S1') + assert_array_equal(f(a), a) + + def test_optional(self): + f = getattr(self.module, self.fprefix + '_optional') + + assert_equal(f(), b"a") + assert_equal(f(b'B'), b"B") + + +class TestMiscCharacter(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_misc_character' + + code = textwrap.dedent(f""" + subroutine {fprefix}_gh18684(x, y, m) + character(len=5), dimension(m), intent(in) :: x + character*5, dimension(m), intent(out) :: y + integer i, m + !f2py integer, intent(hide), depend(x) :: m = f2py_len(x) + do i=1,m + y(i) = x(i) + end do + end subroutine {fprefix}_gh18684 + + subroutine {fprefix}_gh6308(x, i) + integer i + !f2py check(i>=0 && i<12) i + character*5 name, x + common name(12) + name(i + 1) = x + end subroutine {fprefix}_gh6308 + + subroutine {fprefix}_gh4519(x) + character(len=*), intent(in) :: x(:) + !f2py intent(out) x + integer :: i + ! Uncomment for debug printing: + !do i=1, size(x) + ! print*, "x(",i,")=", x(i) + !end do + end subroutine {fprefix}_gh4519 + + pure function {fprefix}_gh3425(x) result (y) + character(len=*), intent(in) :: x + character(len=len(x)) :: y + integer :: i + do i = 1, len(x) + j = iachar(x(i:i)) + if (j>=iachar("a") .and. j<=iachar("z") ) then + y(i:i) = achar(j-32) + else + y(i:i) = x(i:i) + endif + end do + end function {fprefix}_gh3425 + + subroutine {fprefix}_character_bc_new(x, y, z) + character, intent(in) :: x + character, intent(out) :: y + !f2py character, depend(x) :: y = x + !f2py character, dimension((x=='a'?1:2)), depend(x), intent(out) :: z + character, dimension(*) :: z + !f2py character, optional, check(x == 'a' || x == 'b') :: x = 'a' + !f2py callstatement (*f2py_func)(&x, &y, z) + !f2py callprotoargument character*, character*, character* + if (y.eq.x) then + y = x + else + y = 'e' + endif + z(1) = 'c' + end subroutine {fprefix}_character_bc_new + + subroutine {fprefix}_character_bc_old(x, y, z) + character, intent(in) :: x + character, intent(out) :: y + !f2py character, depend(x) :: y = x[0] + !f2py character, dimension((*x=='a'?1:2)), depend(x), intent(out) :: z + character, dimension(*) :: z + !f2py character, optional, check(*x == 'a' || x[0] == 'b') :: x = 'a' + !f2py callstatement (*f2py_func)(x, y, z) + !f2py callprotoargument char*, char*, char* + if (y.eq.x) then + y = x + else + y = 'e' + endif + z(1) = 'c' + end subroutine {fprefix}_character_bc_old + """) + + @pytest.mark.slow + def test_gh18684(self): + # Test character(len=5) and character*5 usages + f = getattr(self.module, self.fprefix + '_gh18684') + x = np.array(["abcde", "fghij"], dtype='S5') + y = f(x) + + assert_array_equal(x, y) + + def test_gh6308(self): + # Test character string array in a common block + f = getattr(self.module, self.fprefix + '_gh6308') + + assert_equal(self.module._BLNK_.name.dtype, np.dtype('S5')) + assert_equal(len(self.module._BLNK_.name), 12) + f("abcde", 0) + assert_equal(self.module._BLNK_.name[0], b"abcde") + f("12345", 5) + assert_equal(self.module._BLNK_.name[5], b"12345") + + def test_gh4519(self): + # Test array of assumed length strings + f = getattr(self.module, self.fprefix + '_gh4519') + + for x, expected in [ + ('a', dict(shape=(), dtype=np.dtype('S1'))), + ('text', dict(shape=(), dtype=np.dtype('S4'))), + (np.array(['1', '2', '3'], dtype='S1'), + dict(shape=(3,), dtype=np.dtype('S1'))), + (['1', '2', '34'], + dict(shape=(3,), dtype=np.dtype('S2'))), + (['', ''], dict(shape=(2,), dtype=np.dtype('S1')))]: + r = f(x) + for k, v in expected.items(): + assert_equal(getattr(r, k), v) + + def test_gh3425(self): + # Test returning a copy of assumed length string + f = getattr(self.module, self.fprefix + '_gh3425') + # f is equivalent to bytes.upper + + assert_equal(f('abC'), b'ABC') + assert_equal(f(''), b'') + assert_equal(f('abC12d'), b'ABC12D') + + @pytest.mark.parametrize("state", ['new', 'old']) + def test_character_bc(self, state): + f = getattr(self.module, self.fprefix + '_character_bc_' + state) + + c, a = f() + assert_equal(c, b'a') + assert_equal(len(a), 1) + + c, a = f(b'b') + assert_equal(c, b'b') + assert_equal(len(a), 2) + + assert_raises(Exception, lambda: f(b'c')) + + +class TestStringScalarArr(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "scalar_string.f90")] + + def test_char(self): + for out in (self.module.string_test.string, + self.module.string_test.string77): + expected = () + assert out.shape == expected + expected = '|S8' + assert out.dtype == expected + + def test_char_arr(self): + for out in (self.module.string_test.strarr, + self.module.string_test.strarr77): + expected = (5,7) + assert out.shape == expected + expected = '|S12' + assert out.dtype == expected + +class TestStringAssumedLength(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "gh24008.f")] + + def test_gh24008(self): + self.module.greet("joe", "bob") + +@pytest.mark.slow +class TestStringOptionalInOut(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "gh24662.f90")] + + def test_gh24662(self): + self.module.string_inout_optional() + a = np.array('hi', dtype='S32') + self.module.string_inout_optional(a) + assert "output string" in a.tobytes().decode() + with pytest.raises(Exception): + aa = "Hi" + self.module.string_inout_optional(aa) + + +@pytest.mark.slow +class TestNewCharHandling(util.F2PyTest): + # from v1.24 onwards, gh-19388 + sources = [ + util.getpath("tests", "src", "string", "gh25286.pyf"), + util.getpath("tests", "src", "string", "gh25286.f90") + ] + module_name = "_char_handling_test" + + def test_gh25286(self): + info = self.module.charint('T') + assert info == 2 + +@pytest.mark.slow +class TestBCCharHandling(util.F2PyTest): + # SciPy style, "incorrect" bindings with a hook + sources = [ + util.getpath("tests", "src", "string", "gh25286_bc.pyf"), + util.getpath("tests", "src", "string", "gh25286.f90") + ] + module_name = "_char_handling_test" + + def test_gh25286(self): + info = self.module.charint('T') + assert info == 2 diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_common.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..af5430c704629186c3d656b686e38d2eb3e90e93 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_common.py @@ -0,0 +1,20 @@ +import pytest +import numpy as np +from . import util + +@pytest.mark.slow +class TestCommonBlock(util.F2PyTest): + sources = [util.getpath("tests", "src", "common", "block.f")] + + def test_common_block(self): + self.module.initcb() + assert self.module.block.long_bn == np.array(1.0, dtype=np.float64) + assert self.module.block.string_bn == np.array("2", dtype="|S1") + assert self.module.block.ok == np.array(3, dtype=np.int32) + + +class TestCommonWithUse(util.F2PyTest): + sources = [util.getpath("tests", "src", "common", "gh19161.f90")] + + def test_common_gh19161(self): + assert self.module.data.x == 0 diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_crackfortran.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_crackfortran.py new file mode 100644 index 0000000000000000000000000000000000000000..06db97943e2e6e70d55fda14a6e8d1c34c9ae9ac --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_crackfortran.py @@ -0,0 +1,407 @@ +import importlib +import codecs +import time +import unicodedata +import pytest +import numpy as np +from numpy.f2py.crackfortran import markinnerspaces, nameargspattern +from . import util +from numpy.f2py import crackfortran +import textwrap +import contextlib +import io + + +class TestNoSpace(util.F2PyTest): + # issue gh-15035: add handling for endsubroutine, endfunction with no space + # between "end" and the block name + sources = [util.getpath("tests", "src", "crackfortran", "gh15035.f")] + + def test_module(self): + k = np.array([1, 2, 3], dtype=np.float64) + w = np.array([1, 2, 3], dtype=np.float64) + self.module.subb(k) + assert np.allclose(k, w + 1) + self.module.subc([w, k]) + assert np.allclose(k, w + 1) + assert self.module.t0("23") == b"2" + + +class TestPublicPrivate: + def test_defaultPrivate(self): + fpath = util.getpath("tests", "src", "crackfortran", "privatemod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "private" in mod["vars"]["a"]["attrspec"] + assert "public" not in mod["vars"]["a"]["attrspec"] + assert "private" in mod["vars"]["b"]["attrspec"] + assert "public" not in mod["vars"]["b"]["attrspec"] + assert "private" not in mod["vars"]["seta"]["attrspec"] + assert "public" in mod["vars"]["seta"]["attrspec"] + + def test_defaultPublic(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "publicmod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "private" in mod["vars"]["a"]["attrspec"] + assert "public" not in mod["vars"]["a"]["attrspec"] + assert "private" not in mod["vars"]["seta"]["attrspec"] + assert "public" in mod["vars"]["seta"]["attrspec"] + + def test_access_type(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "accesstype.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + tt = mod[0]['vars'] + assert set(tt['a']['attrspec']) == {'private', 'bind(c)'} + assert set(tt['b_']['attrspec']) == {'public', 'bind(c)'} + assert set(tt['c']['attrspec']) == {'public'} + + def test_nowrap_private_proceedures(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "gh23879.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + pyf = crackfortran.crack2fortran(mod) + assert 'bar' not in pyf + +class TestModuleProcedure(): + def test_moduleOperators(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "operators.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "body" in mod and len(mod["body"]) == 9 + assert mod["body"][1]["name"] == "operator(.item.)" + assert "implementedby" in mod["body"][1] + assert mod["body"][1]["implementedby"] == \ + ["item_int", "item_real"] + assert mod["body"][2]["name"] == "operator(==)" + assert "implementedby" in mod["body"][2] + assert mod["body"][2]["implementedby"] == ["items_are_equal"] + assert mod["body"][3]["name"] == "assignment(=)" + assert "implementedby" in mod["body"][3] + assert mod["body"][3]["implementedby"] == \ + ["get_int", "get_real"] + + def test_notPublicPrivate(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "pubprivmod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert mod['vars']['a']['attrspec'] == ['private', ] + assert mod['vars']['b']['attrspec'] == ['public', ] + assert mod['vars']['seta']['attrspec'] == ['public', ] + + +class TestExternal(util.F2PyTest): + # issue gh-17859: add external attribute support + sources = [util.getpath("tests", "src", "crackfortran", "gh17859.f")] + + def test_external_as_statement(self): + def incr(x): + return x + 123 + + r = self.module.external_as_statement(incr) + assert r == 123 + + def test_external_as_attribute(self): + def incr(x): + return x + 123 + + r = self.module.external_as_attribute(incr) + assert r == 123 + + +class TestCrackFortran(util.F2PyTest): + # gh-2848: commented lines between parameters in subroutine parameter lists + sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90")] + + def test_gh2848(self): + r = self.module.gh2848(1, 2) + assert r == (1, 2) + + +class TestMarkinnerspaces: + # gh-14118: markinnerspaces does not handle multiple quotations + + def test_do_not_touch_normal_spaces(self): + test_list = ["a ", " a", "a b c", "'abcdefghij'"] + for i in test_list: + assert markinnerspaces(i) == i + + def test_one_relevant_space(self): + assert markinnerspaces("a 'b c' \\' \\'") == "a 'b@_@c' \\' \\'" + assert markinnerspaces(r'a "b c" \" \"') == r'a "b@_@c" \" \"' + + def test_ignore_inner_quotes(self): + assert markinnerspaces("a 'b c\" \" d' e") == "a 'b@_@c\"@_@\"@_@d' e" + assert markinnerspaces("a \"b c' ' d\" e") == "a \"b@_@c'@_@'@_@d\" e" + + def test_multiple_relevant_spaces(self): + assert markinnerspaces("a 'b c' 'd e'") == "a 'b@_@c' 'd@_@e'" + assert markinnerspaces(r'a "b c" "d e"') == r'a "b@_@c" "d@_@e"' + + +class TestDimSpec(util.F2PyTest): + """This test suite tests various expressions that are used as dimension + specifications. + + There exists two usage cases where analyzing dimensions + specifications are important. + + In the first case, the size of output arrays must be defined based + on the inputs to a Fortran function. Because Fortran supports + arbitrary bases for indexing, for instance, `arr(lower:upper)`, + f2py has to evaluate an expression `upper - lower + 1` where + `lower` and `upper` are arbitrary expressions of input parameters. + The evaluation is performed in C, so f2py has to translate Fortran + expressions to valid C expressions (an alternative approach is + that a developer specifies the corresponding C expressions in a + .pyf file). + + In the second case, when user provides an input array with a given + size but some hidden parameters used in dimensions specifications + need to be determined based on the input array size. This is a + harder problem because f2py has to solve the inverse problem: find + a parameter `p` such that `upper(p) - lower(p) + 1` equals to the + size of input array. In the case when this equation cannot be + solved (e.g. because the input array size is wrong), raise an + error before calling the Fortran function (that otherwise would + likely crash Python process when the size of input arrays is + wrong). f2py currently supports this case only when the equation + is linear with respect to unknown parameter. + + """ + + suffix = ".f90" + + code_template = textwrap.dedent(""" + function get_arr_size_{count}(a, n) result (length) + integer, intent(in) :: n + integer, dimension({dimspec}), intent(out) :: a + integer length + length = size(a) + end function + + subroutine get_inv_arr_size_{count}(a, n) + integer :: n + ! the value of n is computed in f2py wrapper + !f2py intent(out) n + integer, dimension({dimspec}), intent(in) :: a + if (a({first}).gt.0) then + ! print*, "a=", a + endif + end subroutine + """) + + linear_dimspecs = [ + "n", "2*n", "2:n", "n/2", "5 - n/2", "3*n:20", "n*(n+1):n*(n+5)", + "2*n, n" + ] + nonlinear_dimspecs = ["2*n:3*n*n+2*n"] + all_dimspecs = linear_dimspecs + nonlinear_dimspecs + + code = "" + for count, dimspec in enumerate(all_dimspecs): + lst = [(d.split(":")[0] if ":" in d else "1") for d in dimspec.split(',')] + code += code_template.format( + count=count, + dimspec=dimspec, + first=", ".join(lst), + ) + + @pytest.mark.parametrize("dimspec", all_dimspecs) + @pytest.mark.slow + def test_array_size(self, dimspec): + + count = self.all_dimspecs.index(dimspec) + get_arr_size = getattr(self.module, f"get_arr_size_{count}") + + for n in [1, 2, 3, 4, 5]: + sz, a = get_arr_size(n) + assert a.size == sz + + @pytest.mark.parametrize("dimspec", all_dimspecs) + def test_inv_array_size(self, dimspec): + + count = self.all_dimspecs.index(dimspec) + get_arr_size = getattr(self.module, f"get_arr_size_{count}") + get_inv_arr_size = getattr(self.module, f"get_inv_arr_size_{count}") + + for n in [1, 2, 3, 4, 5]: + sz, a = get_arr_size(n) + if dimspec in self.nonlinear_dimspecs: + # one must specify n as input, the call we'll ensure + # that a and n are compatible: + n1 = get_inv_arr_size(a, n) + else: + # in case of linear dependence, n can be determined + # from the shape of a: + n1 = get_inv_arr_size(a) + # n1 may be different from n (for instance, when `a` size + # is a function of some `n` fraction) but it must produce + # the same sized array + sz1, _ = get_arr_size(n1) + assert sz == sz1, (n, n1, sz, sz1) + + +class TestModuleDeclaration: + def test_dependencies(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "foo_deps.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + assert mod[0]["vars"]["abar"]["="] == "bar('abar')" + + +class TestEval(util.F2PyTest): + def test_eval_scalar(self): + eval_scalar = crackfortran._eval_scalar + + assert eval_scalar('123', {}) == '123' + assert eval_scalar('12 + 3', {}) == '15' + assert eval_scalar('a + b', dict(a=1, b=2)) == '3' + assert eval_scalar('"123"', {}) == "'123'" + + +class TestFortranReader(util.F2PyTest): + @pytest.mark.parametrize("encoding", + ['ascii', 'utf-8', 'utf-16', 'utf-32']) + def test_input_encoding(self, tmp_path, encoding): + # gh-635 + f_path = tmp_path / f"input_with_{encoding}_encoding.f90" + with f_path.open('w', encoding=encoding) as ff: + ff.write(""" + subroutine foo() + end subroutine foo + """) + mod = crackfortran.crackfortran([str(f_path)]) + assert mod[0]['name'] == 'foo' + + +@pytest.mark.slow +class TestUnicodeComment(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "unicode_comment.f90")] + + @pytest.mark.skipif( + (importlib.util.find_spec("charset_normalizer") is None), + reason="test requires charset_normalizer which is not installed", + ) + def test_encoding_comment(self): + self.module.foo(3) + + +class TestNameArgsPatternBacktracking: + @pytest.mark.parametrize( + ['adversary'], + [ + ('@)@bind@(@',), + ('@)@bind @(@',), + ('@)@bind foo bar baz@(@',) + ] + ) + def test_nameargspattern_backtracking(self, adversary): + '''address ReDOS vulnerability: + https://github.com/numpy/numpy/issues/23338''' + trials_per_batch = 12 + batches_per_regex = 4 + start_reps, end_reps = 15, 25 + for ii in range(start_reps, end_reps): + repeated_adversary = adversary * ii + # test times in small batches. + # this gives us more chances to catch a bad regex + # while still catching it before too long if it is bad + for _ in range(batches_per_regex): + times = [] + for _ in range(trials_per_batch): + t0 = time.perf_counter() + mtch = nameargspattern.search(repeated_adversary) + times.append(time.perf_counter() - t0) + # our pattern should be much faster than 0.2s per search + # it's unlikely that a bad regex will pass even on fast CPUs + assert np.median(times) < 0.2 + assert not mtch + # if the adversary is capped with @)@, it becomes acceptable + # according to the old version of the regex. + # that should still be true. + good_version_of_adversary = repeated_adversary + '@)@' + assert nameargspattern.search(good_version_of_adversary) + +class TestFunctionReturn(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "gh23598.f90")] + + @pytest.mark.slow + def test_function_rettype(self): + # gh-23598 + assert self.module.intproduct(3, 4) == 12 + + +class TestFortranGroupCounters(util.F2PyTest): + def test_end_if_comment(self): + # gh-23533 + fpath = util.getpath("tests", "src", "crackfortran", "gh23533.f") + try: + crackfortran.crackfortran([str(fpath)]) + except Exception as exc: + assert False, f"'crackfortran.crackfortran' raised an exception {exc}" + + +class TestF77CommonBlockReader(): + def test_gh22648(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "gh22648.pyf") + with contextlib.redirect_stdout(io.StringIO()) as stdout_f2py: + mod = crackfortran.crackfortran([str(fpath)]) + assert "Mismatch" not in stdout_f2py.getvalue() + +class TestParamEval(): + # issue gh-11612, array parameter parsing + def test_param_eval_nested(self): + v = '(/3.14, 4./)' + g_params = dict(kind=crackfortran._kind_func, + selected_int_kind=crackfortran._selected_int_kind_func, + selected_real_kind=crackfortran._selected_real_kind_func) + params = {'dp': 8, 'intparamarray': {1: 3, 2: 5}, + 'nested': {1: 1, 2: 2, 3: 3}} + dimspec = '(2)' + ret = crackfortran.param_eval(v, g_params, params, dimspec=dimspec) + assert ret == {1: 3.14, 2: 4.0} + + def test_param_eval_nonstandard_range(self): + v = '(/ 6, 3, 1 /)' + g_params = dict(kind=crackfortran._kind_func, + selected_int_kind=crackfortran._selected_int_kind_func, + selected_real_kind=crackfortran._selected_real_kind_func) + params = {} + dimspec = '(-1:1)' + ret = crackfortran.param_eval(v, g_params, params, dimspec=dimspec) + assert ret == {-1: 6, 0: 3, 1: 1} + + def test_param_eval_empty_range(self): + v = '6' + g_params = dict(kind=crackfortran._kind_func, + selected_int_kind=crackfortran._selected_int_kind_func, + selected_real_kind=crackfortran._selected_real_kind_func) + params = {} + dimspec = '' + pytest.raises(ValueError, crackfortran.param_eval, v, g_params, params, + dimspec=dimspec) + + def test_param_eval_non_array_param(self): + v = '3.14_dp' + g_params = dict(kind=crackfortran._kind_func, + selected_int_kind=crackfortran._selected_int_kind_func, + selected_real_kind=crackfortran._selected_real_kind_func) + params = {} + ret = crackfortran.param_eval(v, g_params, params, dimspec=None) + assert ret == '3.14_dp' + + def test_param_eval_too_many_dims(self): + v = 'reshape((/ (i, i=1, 250) /), (/5, 10, 5/))' + g_params = dict(kind=crackfortran._kind_func, + selected_int_kind=crackfortran._selected_int_kind_func, + selected_real_kind=crackfortran._selected_real_kind_func) + params = {} + dimspec = '(0:4, 3:12, 5)' + pytest.raises(ValueError, crackfortran.param_eval, v, g_params, params, + dimspec=dimspec) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_data.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..edc6410c48864ea9a38565a13b64c8b90fe927c3 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_data.py @@ -0,0 +1,71 @@ +import os +import pytest +import numpy as np + +from . import util +from numpy.f2py.crackfortran import crackfortran + + +class TestData(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_stmts.f90")] + + # For gh-23276 + @pytest.mark.slow + def test_data_stmts(self): + assert self.module.cmplxdat.i == 2 + assert self.module.cmplxdat.j == 3 + assert self.module.cmplxdat.x == 1.5 + assert self.module.cmplxdat.y == 2.0 + assert self.module.cmplxdat.pi == 3.1415926535897932384626433832795028841971693993751058209749445923078164062 + assert self.module.cmplxdat.medium_ref_index == np.array(1.+0.j) + assert np.all(self.module.cmplxdat.z == np.array([3.5, 7.0])) + assert np.all(self.module.cmplxdat.my_array == np.array([ 1.+2.j, -3.+4.j])) + assert np.all(self.module.cmplxdat.my_real_array == np.array([ 1., 2., 3.])) + assert np.all(self.module.cmplxdat.ref_index_one == np.array([13.0 + 21.0j])) + assert np.all(self.module.cmplxdat.ref_index_two == np.array([-30.0 + 43.0j])) + + def test_crackedlines(self): + mod = crackfortran(self.sources) + assert mod[0]['vars']['x']['='] == '1.5' + assert mod[0]['vars']['y']['='] == '2.0' + assert mod[0]['vars']['pi']['='] == '3.1415926535897932384626433832795028841971693993751058209749445923078164062d0' + assert mod[0]['vars']['my_real_array']['='] == '(/1.0d0, 2.0d0, 3.0d0/)' + assert mod[0]['vars']['ref_index_one']['='] == '(13.0d0, 21.0d0)' + assert mod[0]['vars']['ref_index_two']['='] == '(-30.0d0, 43.0d0)' + assert mod[0]['vars']['my_array']['='] == '(/(1.0d0, 2.0d0), (-3.0d0, 4.0d0)/)' + assert mod[0]['vars']['z']['='] == '(/3.5, 7.0/)' + +class TestDataF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_common.f")] + + # For gh-23276 + def test_data_stmts(self): + assert self.module.mycom.mydata == 0 + + def test_crackedlines(self): + mod = crackfortran(str(self.sources[0])) + print(mod[0]['vars']) + assert mod[0]['vars']['mydata']['='] == '0' + + +class TestDataMultiplierF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_multiplier.f")] + + # For gh-23276 + def test_data_stmts(self): + assert self.module.mycom.ivar1 == 3 + assert self.module.mycom.ivar2 == 3 + assert self.module.mycom.ivar3 == 2 + assert self.module.mycom.ivar4 == 2 + assert self.module.mycom.evar5 == 0 + + +class TestDataWithCommentsF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_with_comments.f")] + + # For gh-23276 + def test_data_stmts(self): + assert len(self.module.mycom.mytab) == 3 + assert self.module.mycom.mytab[0] == 0 + assert self.module.mycom.mytab[1] == 4 + assert self.module.mycom.mytab[2] == 0 diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_docs.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_docs.py new file mode 100644 index 0000000000000000000000000000000000000000..7de52dbe37be77385206ba6591cd3c6aa17987b1 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_docs.py @@ -0,0 +1,59 @@ +import pytest +import numpy as np +from numpy.testing import assert_array_equal, assert_equal +from . import util +from pathlib import Path + +def get_docdir(): + parents = Path(__file__).resolve().parents + try: + # Assumes that spin is used to run tests + nproot = parents[8] + except IndexError: + docdir = None + else: + docdir = nproot / "doc" / "source" / "f2py" / "code" + if docdir and docdir.is_dir(): + return docdir + # Assumes that an editable install is used to run tests + return parents[3] / "doc" / "source" / "f2py" / "code" + +pytestmark = pytest.mark.skipif( + not get_docdir().is_dir(), + reason=f"Could not find f2py documentation sources" + f"({get_docdir()} does not exist)", +) + +def _path(*args): + return get_docdir().joinpath(*args) + +@pytest.mark.slow +class TestDocAdvanced(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/build-f2py'] + sources = [_path('asterisk1.f90'), _path('asterisk2.f90'), + _path('ftype.f')] + + def test_asterisk1(self): + foo = getattr(self.module, 'foo1') + assert_equal(foo(), b'123456789A12') + + def test_asterisk2(self): + foo = getattr(self.module, 'foo2') + assert_equal(foo(2), b'12') + assert_equal(foo(12), b'123456789A12') + assert_equal(foo(20), b'123456789A123456789B') + + def test_ftype(self): + ftype = self.module + ftype.foo() + assert_equal(ftype.data.a, 0) + ftype.data.a = 3 + ftype.data.x = [1, 2, 3] + assert_equal(ftype.data.a, 3) + assert_array_equal(ftype.data.x, + np.array([1, 2, 3], dtype=np.float32)) + ftype.data.x[1] = 45 + assert_array_equal(ftype.data.x, + np.array([1, 45, 3], dtype=np.float32)) + + # TODO: implement test methods for other example Fortran codes diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_f2cmap.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_f2cmap.py new file mode 100644 index 0000000000000000000000000000000000000000..4faa46507e329c24be270b205e951d45f1b54b9c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_f2cmap.py @@ -0,0 +1,15 @@ +from . import util +import numpy as np + +class TestF2Cmap(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90"), + util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap") + ] + + # gh-15095 + def test_gh15095(self): + inp = np.ones(3) + out = self.module.func1(inp) + exp_out = 3 + assert out == exp_out diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_f2py2e.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_f2py2e.py new file mode 100644 index 0000000000000000000000000000000000000000..977fb5bc0ccb8fda238458af7d4356c844d52aea --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_f2py2e.py @@ -0,0 +1,896 @@ +import textwrap, re, sys, subprocess, shlex +from pathlib import Path +from collections import namedtuple +import platform + +import pytest + +from . import util +from numpy.f2py.f2py2e import main as f2pycli + +######################### +# CLI utils and classes # +######################### + +PPaths = namedtuple("PPaths", "finp, f90inp, pyf, wrap77, wrap90, cmodf") + + +def get_io_paths(fname_inp, mname="untitled"): + """Takes in a temporary file for testing and returns the expected output and input paths + + Here expected output is essentially one of any of the possible generated + files. + + ..note:: + + Since this does not actually run f2py, none of these are guaranteed to + exist, and module names are typically incorrect + + Parameters + ---------- + fname_inp : str + The input filename + mname : str, optional + The name of the module, untitled by default + + Returns + ------- + genp : NamedTuple PPaths + The possible paths which are generated, not all of which exist + """ + bpath = Path(fname_inp) + return PPaths( + finp=bpath.with_suffix(".f"), + f90inp=bpath.with_suffix(".f90"), + pyf=bpath.with_suffix(".pyf"), + wrap77=bpath.with_name(f"{mname}-f2pywrappers.f"), + wrap90=bpath.with_name(f"{mname}-f2pywrappers2.f90"), + cmodf=bpath.with_name(f"{mname}module.c"), + ) + + +############## +# CLI Fixtures and Tests # +############# + + +@pytest.fixture(scope="session") +def hello_world_f90(tmpdir_factory): + """Generates a single f90 file for testing""" + fdat = util.getpath("tests", "src", "cli", "hiworld.f90").read_text() + fn = tmpdir_factory.getbasetemp() / "hello.f90" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def gh23598_warn(tmpdir_factory): + """F90 file for testing warnings in gh23598""" + fdat = util.getpath("tests", "src", "crackfortran", "gh23598Warn.f90").read_text() + fn = tmpdir_factory.getbasetemp() / "gh23598Warn.f90" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def gh22819_cli(tmpdir_factory): + """F90 file for testing disallowed CLI arguments in ghff819""" + fdat = util.getpath("tests", "src", "cli", "gh_22819.pyf").read_text() + fn = tmpdir_factory.getbasetemp() / "gh_22819.pyf" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def hello_world_f77(tmpdir_factory): + """Generates a single f77 file for testing""" + fdat = util.getpath("tests", "src", "cli", "hi77.f").read_text() + fn = tmpdir_factory.getbasetemp() / "hello.f" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def retreal_f77(tmpdir_factory): + """Generates a single f77 file for testing""" + fdat = util.getpath("tests", "src", "return_real", "foo77.f").read_text() + fn = tmpdir_factory.getbasetemp() / "foo.f" + fn.write_text(fdat, encoding="ascii") + return fn + +@pytest.fixture(scope="session") +def f2cmap_f90(tmpdir_factory): + """Generates a single f90 file for testing""" + fdat = util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90").read_text() + f2cmap = util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap").read_text() + fn = tmpdir_factory.getbasetemp() / "f2cmap.f90" + fmap = tmpdir_factory.getbasetemp() / "mapfile" + fn.write_text(fdat, encoding="ascii") + fmap.write_text(f2cmap, encoding="ascii") + return fn + + +def test_gh22819_cli(capfd, gh22819_cli, monkeypatch): + """Check that module names are handled correctly + gh-22819 + Essentially, the -m name cannot be used to import the module, so the module + named in the .pyf needs to be used instead + + CLI :: -m and a .pyf file + """ + ipath = Path(gh22819_cli) + monkeypatch.setattr(sys, "argv", f"f2py -m blah {ipath}".split()) + with util.switchdir(ipath.parent): + f2pycli() + gen_paths = [item.name for item in ipath.parent.rglob("*") if item.is_file()] + assert "blahmodule.c" not in gen_paths # shouldn't be generated + assert "blah-f2pywrappers.f" not in gen_paths + assert "test_22819-f2pywrappers.f" in gen_paths + assert "test_22819module.c" in gen_paths + assert "Ignoring blah" + + +def test_gh22819_many_pyf(capfd, gh22819_cli, monkeypatch): + """Only one .pyf file allowed + gh-22819 + CLI :: .pyf files + """ + ipath = Path(gh22819_cli) + monkeypatch.setattr(sys, "argv", f"f2py -m blah {ipath} hello.pyf".split()) + with util.switchdir(ipath.parent): + with pytest.raises(ValueError, match="Only one .pyf file per call"): + f2pycli() + + +def test_gh23598_warn(capfd, gh23598_warn, monkeypatch): + foutl = get_io_paths(gh23598_warn, mname="test") + ipath = foutl.f90inp + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test'.split()) + + with util.switchdir(ipath.parent): + f2pycli() # Generate files + wrapper = foutl.wrap90.read_text() + assert "intproductf2pywrap, intpr" not in wrapper + + +def test_gen_pyf(capfd, hello_world_f90, monkeypatch): + """Ensures that a signature file is generated via the CLI + CLI :: -h + """ + ipath = Path(hello_world_f90) + opath = Path(hello_world_f90).stem + ".pyf" + monkeypatch.setattr(sys, "argv", f'f2py -h {opath} {ipath}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() # Generate wrappers + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + assert Path(f'{opath}').exists() + + +def test_gen_pyf_stdout(capfd, hello_world_f90, monkeypatch): + """Ensures that a signature file can be dumped to stdout + CLI :: -h + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -h stdout {ipath}'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + assert "function hi() ! in " in out + + +def test_gen_pyf_no_overwrite(capfd, hello_world_f90, monkeypatch): + """Ensures that the CLI refuses to overwrite signature files + CLI :: -h without --overwrite-signature + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -h faker.pyf {ipath}'.split()) + + with util.switchdir(ipath.parent): + Path("faker.pyf").write_text("Fake news", encoding="ascii") + with pytest.raises(SystemExit): + f2pycli() # Refuse to overwrite + _, err = capfd.readouterr() + assert "Use --overwrite-signature to overwrite" in err + + +@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), + reason='Compiler and 3.12 required') +def test_untitled_cli(capfd, hello_world_f90, monkeypatch): + """Check that modules are named correctly + + CLI :: defaults + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f"f2py --backend meson -c {ipath}".split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "untitledmodule.c" in out + + +@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), reason='Compiler and 3.12 required') +def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): + """Check that no distutils imports are performed on 3.12 + CLI :: --fcompiler --help-link --backend distutils + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f90, mname=MNAME) + ipath = foutl.f90inp + monkeypatch.setattr( + sys, "argv", f"f2py {ipath} -c --fcompiler=gfortran -m {MNAME}".split() + ) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "--fcompiler cannot be used with meson" in out + monkeypatch.setattr( + sys, "argv", f"f2py --help-link".split() + ) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Use --dep for meson builds" in out + MNAME = "hi2" # Needs to be different for a new -c + monkeypatch.setattr( + sys, "argv", f"f2py {ipath} -c -m {MNAME} --backend distutils".split() + ) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Cannot use distutils backend with Python>=3.12" in out + + +@pytest.mark.xfail +def test_f2py_skip(capfd, retreal_f77, monkeypatch): + """Tests that functions can be skipped + CLI :: skip: + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + remaining = "td s0" + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test skip: {toskip}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not found the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in remaining.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_f2py_only(capfd, retreal_f77, monkeypatch): + """Test that functions can be kept by only: + CLI :: only: + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + tokeep = "td s0" + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test only: {tokeep}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in tokeep.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_file_processing_switch(capfd, hello_world_f90, retreal_f77, + monkeypatch): + """Tests that it is possible to return to file processing mode + CLI :: : + BUG: numpy-gh #20520 + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + ipath2 = Path(hello_world_f90) + tokeep = "td s0 hi" # hi is in ipath2 + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -m {mname} only: {tokeep} : {ipath2}'.split( + ), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in tokeep.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch): + """Checks the generation of files based on a module name + CLI :: -m + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f90, mname=MNAME) + ipath = foutl.f90inp + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split()) + with util.switchdir(ipath.parent): + f2pycli() + + # Always generate C module + assert Path.exists(foutl.cmodf) + # File contains a function, check for F77 wrappers + assert Path.exists(foutl.wrap77) + + +def test_mod_gen_gh25263(capfd, hello_world_f77, monkeypatch): + """Check that pyf files are correctly generated with module structure + CLI :: -m -h pyf_file + BUG: numpy-gh #20520 + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f77, mname=MNAME) + ipath = foutl.finp + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME} -h hi.pyf'.split()) + with util.switchdir(ipath.parent): + f2pycli() + with Path('hi.pyf').open() as hipyf: + pyfdat = hipyf.read() + assert "python module hi" in pyfdat + + +def test_lower_cmod(capfd, hello_world_f77, monkeypatch): + """Lowers cases by flag or when -h is present + + CLI :: --[no-]lower + """ + foutl = get_io_paths(hello_world_f77, mname="test") + ipath = foutl.finp + capshi = re.compile(r"HI\(\)") + capslo = re.compile(r"hi\(\)") + # Case I: --lower is passed + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m test --lower'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is not None + assert capshi.search(out) is None + # Case II: --no-lower is passed + monkeypatch.setattr(sys, "argv", + f'f2py {ipath} -m test --no-lower'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is None + assert capshi.search(out) is not None + + +def test_lower_sig(capfd, hello_world_f77, monkeypatch): + """Lowers cases in signature files by flag or when -h is present + + CLI :: --[no-]lower -h + """ + foutl = get_io_paths(hello_world_f77, mname="test") + ipath = foutl.finp + # Signature files + capshi = re.compile(r"Block: HI") + capslo = re.compile(r"Block: hi") + # Case I: --lower is implied by -h + # TODO: Clean up to prevent passing --overwrite-signature + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature'.split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is not None + assert capshi.search(out) is None + + # Case II: --no-lower overrides -h + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature --no-lower' + .split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is None + assert capshi.search(out) is not None + + +def test_build_dir(capfd, hello_world_f90, monkeypatch): + """Ensures that the build directory can be specified + + CLI :: --build-dir + """ + ipath = Path(hello_world_f90) + mname = "blah" + odir = "tttmp" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --build-dir {odir}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert f"Wrote C/API module \"{mname}\"" in out + + +def test_overwrite(capfd, hello_world_f90, monkeypatch): + """Ensures that the build directory can be specified + + CLI :: --overwrite-signature + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr( + sys, "argv", + f'f2py -h faker.pyf {ipath} --overwrite-signature'.split()) + + with util.switchdir(ipath.parent): + Path("faker.pyf").write_text("Fake news", encoding="ascii") + f2pycli() + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + + +def test_latexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --latex-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --latex-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" in out + with Path(f"{mname}module.tex").open() as otex: + assert "\\documentclass" in otex.read() + + +def test_nolatexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --no-latex-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-latex-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" not in out + + +def test_shortlatex(capfd, hello_world_f90, monkeypatch): + """Ensures that truncated documentation is written out + + TODO: Test to ensure this has no effect without --latex-doc + CLI :: --latex-doc --short-latex + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py -m {mname} {ipath} --latex-doc --short-latex'.split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" in out + with Path(f"./{mname}module.tex").open() as otex: + assert "\\documentclass" not in otex.read() + + +def test_restdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that RsT documentation is written out + + CLI :: --rest-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --rest-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "ReST Documentation is saved to file" in out + with Path(f"./{mname}module.rest").open() as orst: + assert r".. -*- rest -*-" in orst.read() + + +def test_norestexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --no-rest-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-rest-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "ReST Documentation is saved to file" not in out + + +def test_debugcapi(capfd, hello_world_f90, monkeypatch): + """Ensures that debugging wrappers are written + + CLI :: --debug-capi + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --debug-capi'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + with Path(f"./{mname}module.c").open() as ocmod: + assert r"#define DEBUGCFUNCS" in ocmod.read() + + +@pytest.mark.skip(reason="Consistently fails on CI; noisy so skip not xfail.") +def test_debugcapi_bld(hello_world_f90, monkeypatch): + """Ensures that debugging wrappers work + + CLI :: --debug-capi -c + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} -c --debug-capi'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + cmd_run = shlex.split("python3 -c \"import blah; blah.hi()\"") + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + eerr = textwrap.dedent("""\ +debug-capi:Python C/API function blah.hi() +debug-capi:float hi=:output,hidden,scalar +debug-capi:hi=0 +debug-capi:Fortran subroutine `f2pywraphi(&hi)' +debug-capi:hi=0 +debug-capi:Building return value. +debug-capi:Python C/API function blah.hi: successful. +debug-capi:Freeing memory. + """) + assert rout.stdout == eout + assert rout.stderr == eerr + + +def test_wrapfunc_def(capfd, hello_world_f90, monkeypatch): + """Ensures that fortran subroutine wrappers for F77 are included by default + + CLI :: --[no]-wrap-functions + """ + # Implied + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" in out + + # Explicit + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --wrap-functions'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" in out + + +def test_nowrapfunc(capfd, hello_world_f90, monkeypatch): + """Ensures that fortran subroutine wrappers for F77 can be disabled + + CLI :: --no-wrap-functions + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-wrap-functions'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" not in out + + +def test_inclheader(capfd, hello_world_f90, monkeypatch): + """Add to the include directories + + CLI :: -include + TODO: Document this in the help string + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py -m {mname} {ipath} -include -include '. + split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + with Path(f"./{mname}module.c").open() as ocmod: + ocmr = ocmod.read() + assert "#include " in ocmr + assert "#include " in ocmr + + +def test_inclpath(): + """Add to the include directories + + CLI :: --include-paths + """ + # TODO: populate + pass + + +def test_hlink(): + """Add to the include directories + + CLI :: --help-link + """ + # TODO: populate + pass + + +def test_f2cmap(capfd, f2cmap_f90, monkeypatch): + """Check that Fortran-to-Python KIND specs can be passed + + CLI :: --f2cmap + """ + ipath = Path(f2cmap_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --f2cmap mapfile'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Reading f2cmap from 'mapfile' ..." in out + assert "Mapping \"real(kind=real32)\" to \"float\"" in out + assert "Mapping \"real(kind=real64)\" to \"double\"" in out + assert "Mapping \"integer(kind=int64)\" to \"long_long\"" in out + assert "Successfully applied user defined f2cmap changes" in out + + +def test_quiet(capfd, hello_world_f90, monkeypatch): + """Reduce verbosity + + CLI :: --quiet + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --quiet'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert len(out) == 0 + + +def test_verbose(capfd, hello_world_f90, monkeypatch): + """Increase verbosity + + CLI :: --verbose + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --verbose'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "analyzeline" in out + + +def test_version(capfd, monkeypatch): + """Ensure version + + CLI :: -v + """ + monkeypatch.setattr(sys, "argv", 'f2py -v'.split()) + # TODO: f2py2e should not call sys.exit() after printing the version + with pytest.raises(SystemExit): + f2pycli() + out, _ = capfd.readouterr() + import numpy as np + assert np.__version__ == out.strip() + + +@pytest.mark.skip(reason="Consistently fails on CI; noisy so skip not xfail.") +def test_npdistop(hello_world_f90, monkeypatch): + """ + CLI :: -c + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + cmd_run = shlex.split("python -c \"import blah; blah.hi()\"") + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + + +# Numpy distutils flags +# TODO: These should be tested separately + + +def test_npd_fcompiler(): + """ + CLI :: -c --fcompiler + """ + # TODO: populate + pass + + +def test_npd_compiler(): + """ + CLI :: -c --compiler + """ + # TODO: populate + pass + + +def test_npd_help_fcompiler(): + """ + CLI :: -c --help-fcompiler + """ + # TODO: populate + pass + + +def test_npd_f77exec(): + """ + CLI :: -c --f77exec + """ + # TODO: populate + pass + + +def test_npd_f90exec(): + """ + CLI :: -c --f90exec + """ + # TODO: populate + pass + + +def test_npd_f77flags(): + """ + CLI :: -c --f77flags + """ + # TODO: populate + pass + + +def test_npd_f90flags(): + """ + CLI :: -c --f90flags + """ + # TODO: populate + pass + + +def test_npd_opt(): + """ + CLI :: -c --opt + """ + # TODO: populate + pass + + +def test_npd_arch(): + """ + CLI :: -c --arch + """ + # TODO: populate + pass + + +def test_npd_noopt(): + """ + CLI :: -c --noopt + """ + # TODO: populate + pass + + +def test_npd_noarch(): + """ + CLI :: -c --noarch + """ + # TODO: populate + pass + + +def test_npd_debug(): + """ + CLI :: -c --debug + """ + # TODO: populate + pass + + +def test_npd_link_auto(): + """ + CLI :: -c --link- + """ + # TODO: populate + pass + + +def test_npd_lib(): + """ + CLI :: -c -L/path/to/lib/ -l + """ + # TODO: populate + pass + + +def test_npd_define(): + """ + CLI :: -D + """ + # TODO: populate + pass + + +def test_npd_undefine(): + """ + CLI :: -U + """ + # TODO: populate + pass + + +def test_npd_incl(): + """ + CLI :: -I/path/to/include/ + """ + # TODO: populate + pass + + +def test_npd_linker(): + """ + CLI :: .o .so .a + """ + # TODO: populate + pass diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_isoc.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_isoc.py new file mode 100644 index 0000000000000000000000000000000000000000..356d6f2efe22ee027d03101bb66c052f6aadb69c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_isoc.py @@ -0,0 +1,53 @@ +from . import util +import numpy as np +import pytest +from numpy.testing import assert_allclose + +class TestISOC(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "isocintrin", "isoCtests.f90"), + ] + + # gh-24553 + @pytest.mark.slow + def test_c_double(self): + out = self.module.coddity.c_add(1, 2) + exp_out = 3 + assert out == exp_out + + # gh-9693 + def test_bindc_function(self): + out = self.module.coddity.wat(1, 20) + exp_out = 8 + assert out == exp_out + + # gh-25207 + def test_bindc_kinds(self): + out = self.module.coddity.c_add_int64(1, 20) + exp_out = 21 + assert out == exp_out + + # gh-25207 + def test_bindc_add_arr(self): + a = np.array([1,2,3]) + b = np.array([1,2,3]) + out = self.module.coddity.add_arr(a, b) + exp_out = a*2 + assert_allclose(out, exp_out) + + +def test_process_f2cmap_dict(): + from numpy.f2py.auxfuncs import process_f2cmap_dict + + f2cmap_all = {"integer": {"8": "rubbish_type"}} + new_map = {"INTEGER": {"4": "int"}} + c2py_map = {"int": "int", "rubbish_type": "long"} + + exp_map, exp_maptyp = ({"integer": {"8": "rubbish_type", "4": "int"}}, ["int"]) + + # Call the function + res_map, res_maptyp = process_f2cmap_dict(f2cmap_all, new_map, c2py_map) + + # Assert the result is as expected + assert res_map == exp_map + assert res_maptyp == exp_maptyp diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_kind.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_kind.py new file mode 100644 index 0000000000000000000000000000000000000000..0afc87e2728c5c42070bb26c0af2d95e207c52bc --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_kind.py @@ -0,0 +1,50 @@ +import sys +import os +import pytest +import platform + +from numpy.f2py.crackfortran import ( + _selected_int_kind_func as selected_int_kind, + _selected_real_kind_func as selected_real_kind, +) +from . import util + + +class TestKind(util.F2PyTest): + sources = [util.getpath("tests", "src", "kind", "foo.f90")] + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, + reason="Fails for 32 bit machines") + def test_int(self): + """Test `int` kind_func for integers up to 10**40.""" + selectedintkind = self.module.selectedintkind + + for i in range(40): + assert selectedintkind(i) == selected_int_kind( + i + ), f"selectedintkind({i}): expected {selected_int_kind(i)!r} but got {selectedintkind(i)!r}" + + def test_real(self): + """ + Test (processor-dependent) `real` kind_func for real numbers + of up to 31 digits precision (extended/quadruple). + """ + selectedrealkind = self.module.selectedrealkind + + for i in range(32): + assert selectedrealkind(i) == selected_real_kind( + i + ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" + + @pytest.mark.xfail(platform.machine().lower().startswith("ppc"), + reason="Some PowerPC may not support full IEEE 754 precision") + def test_quad_precision(self): + """ + Test kind_func for quadruple precision [`real(16)`] of 32+ digits . + """ + selectedrealkind = self.module.selectedrealkind + + for i in range(32, 40): + assert selectedrealkind(i) == selected_real_kind( + i + ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_mixed.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_mixed.py new file mode 100644 index 0000000000000000000000000000000000000000..1dedb696597682423e9474690f021b7ac74c54ea --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_mixed.py @@ -0,0 +1,34 @@ +import os +import textwrap +import pytest + +from numpy.testing import IS_PYPY +from . import util + + +class TestMixed(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "mixed", "foo.f"), + util.getpath("tests", "src", "mixed", "foo_fixed.f90"), + util.getpath("tests", "src", "mixed", "foo_free.f90"), + ] + + @pytest.mark.slow + def test_all(self): + assert self.module.bar11() == 11 + assert self.module.foo_fixed.bar12() == 12 + assert self.module.foo_free.bar13() == 13 + + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_docstring(self): + expected = textwrap.dedent("""\ + a = bar11() + + Wrapper for ``bar11``. + + Returns + ------- + a : int + """) + assert self.module.bar11.__doc__ == expected diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_modules.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..7a656a479a76e4ceca5b08fad5f78bb7582879de --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_modules.py @@ -0,0 +1,50 @@ +import pytest +import textwrap + +from . import util +from numpy.testing import IS_PYPY + + +@pytest.mark.slow +class TestModuleDocString(util.F2PyTest): + sources = [util.getpath("tests", "src", "modules", "module_data_docstring.f90")] + + @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_module_docstring(self): + assert self.module.mod.__doc__ == textwrap.dedent( + """\ + i : 'i'-scalar + x : 'i'-array(4) + a : 'f'-array(2,3) + b : 'f'-array(-1,-1), not allocated\x00 + foo()\n + Wrapper for ``foo``.\n\n""" + ) + + +@pytest.mark.slow +class TestModuleAndSubroutine(util.F2PyTest): + module_name = "example" + sources = [ + util.getpath("tests", "src", "modules", "gh25337", "data.f90"), + util.getpath("tests", "src", "modules", "gh25337", "use_data.f90"), + ] + + def test_gh25337(self): + self.module.data.set_shift(3) + assert "data" in dir(self.module) + + +@pytest.mark.slow +class TestUsedModule(util.F2PyTest): + module_name = "fmath" + sources = [ + util.getpath("tests", "src", "modules", "use_modules.f90"), + ] + + def test_gh25867(self): + compiled_mods = [x for x in dir(self.module) if "__" not in x] + assert "useops" in compiled_mods + assert self.module.useops.sum_and_double(3, 7) == 20 + assert "mathops" in compiled_mods + assert self.module.mathops.add(3, 7) == 10 diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_parameter.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_parameter.py new file mode 100644 index 0000000000000000000000000000000000000000..826a04cb9e1f643be9e8243e739da06dfafc6571 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_parameter.py @@ -0,0 +1,131 @@ +import os +import pytest + +import numpy as np + +from . import util + + +class TestParameters(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [ + util.getpath("tests", "src", "parameter", "constant_real.f90"), + util.getpath("tests", "src", "parameter", "constant_integer.f90"), + util.getpath("tests", "src", "parameter", "constant_both.f90"), + util.getpath("tests", "src", "parameter", "constant_compound.f90"), + util.getpath("tests", "src", "parameter", "constant_non_compound.f90"), + util.getpath("tests", "src", "parameter", "constant_array.f90"), + ] + + @pytest.mark.slow + def test_constant_real_single(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + pytest.raises(ValueError, self.module.foo_single, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo_single(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_real_double(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_double, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_double(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_compound_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + pytest.raises(ValueError, self.module.foo_compound_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_compound_int(x) + assert np.allclose(x, [0 + 1 + 2 * 6, 1, 2]) + + @pytest.mark.slow + def test_constant_non_compound_int(self): + # check values + x = np.arange(4, dtype=np.int32) + self.module.foo_non_compound_int(x) + assert np.allclose(x, [0 + 1 + 2 + 3 * 4, 1, 2, 3]) + + @pytest.mark.slow + def test_constant_integer_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + pytest.raises(ValueError, self.module.foo_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_int(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_integer_long(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int64)[::2] + pytest.raises(ValueError, self.module.foo_long, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int64) + self.module.foo_long(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_both(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + @pytest.mark.slow + def test_constant_no(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_no, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_no(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + @pytest.mark.slow + def test_constant_sum(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_sum, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_sum(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + def test_constant_array(self): + x = np.arange(3, dtype=np.float64) + y = np.arange(5, dtype=np.float64) + z = self.module.foo_array(x, y) + assert np.allclose(x, [0.0, 1./10, 2./10]) + assert np.allclose(y, [0.0, 1.*10, 2.*10, 3.*10, 4.*10]) + assert np.allclose(z, 19.0) + + def test_constant_array_any_index(self): + x = np.arange(6, dtype=np.float64) + y = self.module.foo_array_any_index(x) + assert np.allclose(y, x.reshape((2, 3), order='F')) + + def test_constant_array_delims(self): + x = self.module.foo_array_delims() + assert x == 9 + diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_pyf_src.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_pyf_src.py new file mode 100644 index 0000000000000000000000000000000000000000..e93543de4244c32f56f4ef29aef00031ace1f84d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_pyf_src.py @@ -0,0 +1,44 @@ +# This test is ported from numpy.distutils +from numpy.f2py._src_pyf import process_str +from numpy.testing import assert_equal + + +pyf_src = """ +python module foo + <_rd=real,double precision> + interface + subroutine foosub(tol) + <_rd>, intent(in,out) :: tol + end subroutine foosub + end interface +end python module foo +""" + +expected_pyf = """ +python module foo + interface + subroutine sfoosub(tol) + real, intent(in,out) :: tol + end subroutine sfoosub + subroutine dfoosub(tol) + double precision, intent(in,out) :: tol + end subroutine dfoosub + end interface +end python module foo +""" + + +def normalize_whitespace(s): + """ + Remove leading and trailing whitespace, and convert internal + stretches of whitespace to a single space. + """ + return ' '.join(s.split()) + + +def test_from_template(): + """Regression test for gh-10712.""" + pyf = process_str(pyf_src) + normalized_pyf = normalize_whitespace(pyf) + normalized_expected_pyf = normalize_whitespace(expected_pyf) + assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_quoted_character.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_quoted_character.py new file mode 100644 index 0000000000000000000000000000000000000000..ebbcf0c66c2915e876442b806088f99cbdf17e1c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_quoted_character.py @@ -0,0 +1,17 @@ +"""See https://github.com/numpy/numpy/pull/10676. + +""" +import sys +import pytest + +from . import util + + +class TestQuotedCharacter(util.F2PyTest): + sources = [util.getpath("tests", "src", "quoted_character", "foo.f")] + + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") + @pytest.mark.slow + def test_quoted_character(self): + assert self.module.foo() == (b"'", b'"', b";", b"!", b"(", b")") diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_regression.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..2c2cd90136104aa6eddb395c28f82a12f83b19bc --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_regression.py @@ -0,0 +1,141 @@ +import os +import pytest +import platform + +import numpy as np +import numpy.testing as npt + +from . import util + + +class TestIntentInOut(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [util.getpath("tests", "src", "regression", "inout.f90")] + + @pytest.mark.slow + def test_inout(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + pytest.raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo(x) + assert np.allclose(x, [3, 1, 2]) + + +class TestNegativeBounds(util.F2PyTest): + # Check that negative bounds work correctly + sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] + + @pytest.mark.slow + def test_negbound(self): + xvec = np.arange(12) + xlow = -6 + xhigh = 4 + # Calculate the upper bound, + # Keeping the 1 index in mind + def ubound(xl, xh): + return xh - xl + 1 + rval = self.module.foo(is_=xlow, ie_=xhigh, + arr=xvec[:ubound(xlow, xhigh)]) + expval = np.arange(11, dtype = np.float32) + assert np.allclose(rval, expval) + + +class TestNumpyVersionAttribute(util.F2PyTest): + # Check that th attribute __f2py_numpy_version__ is present + # in the compiled module and that has the value np.__version__. + sources = [util.getpath("tests", "src", "regression", "inout.f90")] + + @pytest.mark.slow + def test_numpy_version_attribute(self): + + # Check that self.module has an attribute named "__f2py_numpy_version__" + assert hasattr(self.module, "__f2py_numpy_version__") + + # Check that the attribute __f2py_numpy_version__ is a string + assert isinstance(self.module.__f2py_numpy_version__, str) + + # Check that __f2py_numpy_version__ has the value numpy.__version__ + assert np.__version__ == self.module.__f2py_numpy_version__ + + +def test_include_path(): + incdir = np.f2py.get_include() + fnames_in_dir = os.listdir(incdir) + for fname in ("fortranobject.c", "fortranobject.h"): + assert fname in fnames_in_dir + + +class TestIncludeFiles(util.F2PyTest): + sources = [util.getpath("tests", "src", "regression", "incfile.f90")] + options = [f"-I{util.getpath('tests', 'src', 'regression')}", + f"--include-paths {util.getpath('tests', 'src', 'regression')}"] + + @pytest.mark.slow + def test_gh25344(self): + exp = 7.0 + res = self.module.add(3.0, 4.0) + assert exp == res + +class TestF77Comments(util.F2PyTest): + # Check that comments are stripped from F77 continuation lines + sources = [util.getpath("tests", "src", "regression", "f77comments.f")] + + @pytest.mark.slow + def test_gh26148(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res=self.module.testsub(x1, x2) + assert(res[0] == 8) + assert(res[1] == 15) + + @pytest.mark.slow + def test_gh26466(self): + # Check that comments after PARAMETER directions are stripped + expected = np.arange(1, 11, dtype=np.float32)*2 + res=self.module.testsub2() + npt.assert_allclose(expected, res) + +class TestF90Contiuation(util.F2PyTest): + # Check that comments are stripped from F90 continuation lines + sources = [util.getpath("tests", "src", "regression", "f90continuation.f90")] + + @pytest.mark.slow + def test_gh26148b(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res=self.module.testsub(x1, x2) + assert(res[0] == 8) + assert(res[1] == 15) + +@pytest.mark.slow +def test_gh26623(): + # Including libraries with . should not generate an incorrect meson.build + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f90continuation.f90")], + ["-lfoo.bar"], + module_name="Blah", + ) + except RuntimeError as rerr: + assert "lparen got assign" not in str(rerr) + + +@pytest.mark.slow +@pytest.mark.skipif(platform.system() not in ['Linux', 'Darwin'], reason='Unsupported on this platform for now') +def test_gh25784(): + # Compile dubious file using passed flags + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f77fixedform.f95")], + options=[ + # Meson will collect and dedup these to pass to fortran_args: + "--f77flags='-ffixed-form -O2'", + "--f90flags=\"-ffixed-form -Og\"", + ], + module_name="Blah", + ) + except ImportError as rerr: + assert "unknown_subroutine_" in str(rerr) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_character.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_character.py new file mode 100644 index 0000000000000000000000000000000000000000..bd30d378f1bd370b9fc25a1d38e24bb9b004114e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_character.py @@ -0,0 +1,46 @@ +import pytest + +from numpy import array +from . import util +import platform + +IS_S390X = platform.machine() == "s390x" + + +@pytest.mark.slow +class TestReturnCharacter(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t1", "s0", "s1"]: + assert t("23") == b"2" + r = t("ab") + assert r == b"a" + r = t(array("ab")) + assert r == b"a" + r = t(array(77, "u1")) + assert r == b"M" + elif tname in ["ts", "ss"]: + assert t(23) == b"23" + assert t("123456789abcdef") == b"123456789a" + elif tname in ["t5", "s5"]: + assert t(23) == b"23" + assert t("ab") == b"ab" + assert t("123456789abcdef") == b"12345" + else: + raise NotImplementedError + + +class TestFReturnCharacter(TestReturnCharacter): + sources = [ + util.getpath("tests", "src", "return_character", "foo77.f"), + util.getpath("tests", "src", "return_character", "foo90.f90"), + ] + + @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") + @pytest.mark.parametrize("name", "t0,t1,t5,s0,s1,s5,ss".split(",")) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") + @pytest.mark.parametrize("name", "t0,t1,t5,ts,s0,s1,s5,ss".split(",")) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_char, name), name) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_complex.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_complex.py new file mode 100644 index 0000000000000000000000000000000000000000..15a5e60243fb66169320f415025b18bce585a90c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_complex.py @@ -0,0 +1,66 @@ +import pytest + +from numpy import array +from . import util + + +@pytest.mark.slow +class TestReturnComplex(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t8", "s0", "s8"]: + err = 1e-5 + else: + err = 0.0 + assert abs(t(234j) - 234.0j) <= err + assert abs(t(234.6) - 234.6) <= err + assert abs(t(234) - 234.0) <= err + assert abs(t(234.6 + 3j) - (234.6 + 3j)) <= err + # assert abs(t('234')-234.)<=err + # assert abs(t('234.6')-234.6)<=err + assert abs(t(-234) + 234.0) <= err + assert abs(t([234]) - 234.0) <= err + assert abs(t((234, )) - 234.0) <= err + assert abs(t(array(234)) - 234.0) <= err + assert abs(t(array(23 + 4j, "F")) - (23 + 4j)) <= err + assert abs(t(array([234])) - 234.0) <= err + assert abs(t(array([[234]])) - 234.0) <= err + assert abs(t(array([234]).astype("b")) + 22.0) <= err + assert abs(t(array([234], "h")) - 234.0) <= err + assert abs(t(array([234], "i")) - 234.0) <= err + assert abs(t(array([234], "l")) - 234.0) <= err + assert abs(t(array([234], "q")) - 234.0) <= err + assert abs(t(array([234], "f")) - 234.0) <= err + assert abs(t(array([234], "d")) - 234.0) <= err + assert abs(t(array([234 + 3j], "F")) - (234 + 3j)) <= err + assert abs(t(array([234], "D")) - 234.0) <= err + + # pytest.raises(TypeError, t, array([234], 'S1')) + pytest.raises(TypeError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) + + try: + r = t(10**400) + assert repr(r) in ["(inf+0j)", "(Infinity+0j)"] + except OverflowError: + pass + + +class TestFReturnComplex(TestReturnComplex): + sources = [ + util.getpath("tests", "src", "return_complex", "foo77.f"), + util.getpath("tests", "src", "return_complex", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(",")) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(",")) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_complex, name), + name) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_integer.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_integer.py new file mode 100644 index 0000000000000000000000000000000000000000..151ec402ff7a6f5d53e8acfed90fc42990469d90 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_integer.py @@ -0,0 +1,54 @@ +import pytest + +from numpy import array +from . import util + + +@pytest.mark.slow +class TestReturnInteger(util.F2PyTest): + def check_function(self, t, tname): + assert t(123) == 123 + assert t(123.6) == 123 + assert t("123") == 123 + assert t(-123) == -123 + assert t([123]) == 123 + assert t((123, )) == 123 + assert t(array(123)) == 123 + assert t(array(123, "b")) == 123 + assert t(array(123, "h")) == 123 + assert t(array(123, "i")) == 123 + assert t(array(123, "l")) == 123 + assert t(array(123, "B")) == 123 + assert t(array(123, "f")) == 123 + assert t(array(123, "d")) == 123 + + # pytest.raises(ValueError, t, array([123],'S3')) + pytest.raises(ValueError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(Exception, t, t) + pytest.raises(Exception, t, {}) + + if tname in ["t8", "s8"]: + pytest.raises(OverflowError, t, 100000000000000000000000) + pytest.raises(OverflowError, t, 10000000011111111111111.23) + + +class TestFReturnInteger(TestReturnInteger): + sources = [ + util.getpath("tests", "src", "return_integer", "foo77.f"), + util.getpath("tests", "src", "return_integer", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", + "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", + "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_integer, name), + name) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_logical.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_logical.py new file mode 100644 index 0000000000000000000000000000000000000000..f9a9e97dbd3ecbcf7596470a54d1d65ccf1fe4a1 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_logical.py @@ -0,0 +1,64 @@ +import pytest + +from numpy import array +from . import util + + +class TestReturnLogical(util.F2PyTest): + def check_function(self, t): + assert t(True) == 1 + assert t(False) == 0 + assert t(0) == 0 + assert t(None) == 0 + assert t(0.0) == 0 + assert t(0j) == 0 + assert t(1j) == 1 + assert t(234) == 1 + assert t(234.6) == 1 + assert t(234.6 + 3j) == 1 + assert t("234") == 1 + assert t("aaa") == 1 + assert t("") == 0 + assert t([]) == 0 + assert t(()) == 0 + assert t({}) == 0 + assert t(t) == 1 + assert t(-234) == 1 + assert t(10**100) == 1 + assert t([234]) == 1 + assert t((234, )) == 1 + assert t(array(234)) == 1 + assert t(array([234])) == 1 + assert t(array([[234]])) == 1 + assert t(array([127], "b")) == 1 + assert t(array([234], "h")) == 1 + assert t(array([234], "i")) == 1 + assert t(array([234], "l")) == 1 + assert t(array([234], "f")) == 1 + assert t(array([234], "d")) == 1 + assert t(array([234 + 3j], "F")) == 1 + assert t(array([234], "D")) == 1 + assert t(array(0)) == 0 + assert t(array([0])) == 0 + assert t(array([[0]])) == 0 + assert t(array([0j])) == 0 + assert t(array([1])) == 1 + pytest.raises(ValueError, t, array([0, 0])) + + +class TestFReturnLogical(TestReturnLogical): + sources = [ + util.getpath("tests", "src", "return_logical", "foo77.f"), + util.getpath("tests", "src", "return_logical", "foo90.f90"), + ] + + @pytest.mark.slow + @pytest.mark.parametrize("name", "t0,t1,t2,t4,s0,s1,s2,s4".split(",")) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name)) + + @pytest.mark.slow + @pytest.mark.parametrize("name", + "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_logical, name)) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_real.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_real.py new file mode 100644 index 0000000000000000000000000000000000000000..1d1b981161ebdc3850c3ee9f0cbb87693ba7e8e2 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_return_real.py @@ -0,0 +1,108 @@ +import platform +import pytest +import numpy as np + +from numpy import array +from . import util + + +@pytest.mark.slow +class TestReturnReal(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t4", "s0", "s4"]: + err = 1e-5 + else: + err = 0.0 + assert abs(t(234) - 234.0) <= err + assert abs(t(234.6) - 234.6) <= err + assert abs(t("234") - 234) <= err + assert abs(t("234.6") - 234.6) <= err + assert abs(t(-234) + 234) <= err + assert abs(t([234]) - 234) <= err + assert abs(t((234, )) - 234.0) <= err + assert abs(t(array(234)) - 234.0) <= err + assert abs(t(array(234).astype("b")) + 22) <= err + assert abs(t(array(234, "h")) - 234.0) <= err + assert abs(t(array(234, "i")) - 234.0) <= err + assert abs(t(array(234, "l")) - 234.0) <= err + assert abs(t(array(234, "B")) - 234.0) <= err + assert abs(t(array(234, "f")) - 234.0) <= err + assert abs(t(array(234, "d")) - 234.0) <= err + if tname in ["t0", "t4", "s0", "s4"]: + assert t(1e200) == t(1e300) # inf + + # pytest.raises(ValueError, t, array([234], 'S1')) + pytest.raises(ValueError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(Exception, t, t) + pytest.raises(Exception, t, {}) + + try: + r = t(10**400) + assert repr(r) in ["inf", "Infinity"] + except OverflowError: + pass + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + np.dtype(np.intp).itemsize < 8, + reason="32-bit builds are buggy" +) +class TestCReturnReal(TestReturnReal): + suffix = ".pyf" + module_name = "c_ext_return_real" + code = """ +python module c_ext_return_real +usercode \'\'\' +float t4(float value) { return value; } +void s4(float *t4, float value) { *t4 = value; } +double t8(double value) { return value; } +void s8(double *t8, double value) { *t8 = value; } +\'\'\' +interface + function t4(value) + real*4 intent(c) :: t4,value + end + function t8(value) + real*8 intent(c) :: t8,value + end + subroutine s4(t4,value) + intent(c) s4 + real*4 intent(out) :: t4 + real*4 intent(c) :: value + end + subroutine s8(t8,value) + intent(c) s8 + real*8 intent(out) :: t8 + real*8 intent(c) :: value + end +end interface +end python module c_ext_return_real + """ + + @pytest.mark.parametrize("name", "t4,t8,s4,s8".split(",")) + def test_all(self, name): + self.check_function(getattr(self.module, name), name) + + +class TestFReturnReal(TestReturnReal): + sources = [ + util.getpath("tests", "src", "return_real", "foo77.f"), + util.getpath("tests", "src", "return_real", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(",")) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(",")) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_real, name), name) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_semicolon_split.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_semicolon_split.py new file mode 100644 index 0000000000000000000000000000000000000000..4c32c67b76259f50173dccbf45bb2f820d2166c6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_semicolon_split.py @@ -0,0 +1,75 @@ +import platform +import pytest +import numpy as np + +from . import util + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + np.dtype(np.intp).itemsize < 8, + reason="32-bit builds are buggy" +) +class TestMultiline(util.F2PyTest): + suffix = ".pyf" + module_name = "multiline" + code = f""" +python module {module_name} + usercode ''' +void foo(int* x) {{ + char dummy = ';'; + *x = 42; +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + end subroutine foo + end interface +end python module {module_name} + """ + + def test_multiline(self): + assert self.module.foo() == 42 + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + np.dtype(np.intp).itemsize < 8, + reason="32-bit builds are buggy" +) +@pytest.mark.slow +class TestCallstatement(util.F2PyTest): + suffix = ".pyf" + module_name = "callstatement" + code = f""" +python module {module_name} + usercode ''' +void foo(int* x) {{ +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + callprotoargument int* + callstatement {{ & + ; & + x = 42; & + }} + end subroutine foo + end interface +end python module {module_name} + """ + + def test_callstatement(self): + assert self.module.foo() == 42 diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_size.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_size.py new file mode 100644 index 0000000000000000000000000000000000000000..d5f49494cdd71050244c72d9db0c5bf7e49592e0 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_size.py @@ -0,0 +1,45 @@ +import os +import pytest +import numpy as np + +from . import util + + +class TestSizeSumExample(util.F2PyTest): + sources = [util.getpath("tests", "src", "size", "foo.f90")] + + @pytest.mark.slow + def test_all(self): + r = self.module.foo([[]]) + assert r == [0] + + r = self.module.foo([[1, 2]]) + assert r == [3] + + r = self.module.foo([[1, 2], [3, 4]]) + assert np.allclose(r, [3, 7]) + + r = self.module.foo([[1, 2], [3, 4], [5, 6]]) + assert np.allclose(r, [3, 7, 11]) + + @pytest.mark.slow + def test_transpose(self): + r = self.module.trans([[]]) + assert np.allclose(r.T, np.array([[]])) + + r = self.module.trans([[1, 2]]) + assert np.allclose(r, [[1.], [2.]]) + + r = self.module.trans([[1, 2, 3], [4, 5, 6]]) + assert np.allclose(r, [[1, 4], [2, 5], [3, 6]]) + + @pytest.mark.slow + def test_flatten(self): + r = self.module.flatten([[]]) + assert np.allclose(r, []) + + r = self.module.flatten([[1, 2]]) + assert np.allclose(r, [1, 2]) + + r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) + assert np.allclose(r, [1, 2, 3, 4, 5, 6]) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_string.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_string.py new file mode 100644 index 0000000000000000000000000000000000000000..d5edac5dae740e27c8a6d0917de643bef8a32f36 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_string.py @@ -0,0 +1,100 @@ +import os +import pytest +import textwrap +import numpy as np +from . import util + + +class TestString(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "char.f90")] + + @pytest.mark.slow + def test_char(self): + strings = np.array(["ab", "cd", "ef"], dtype="c").T + inp, out = self.module.char_test.change_strings( + strings, strings.shape[1]) + assert inp == pytest.approx(strings) + expected = strings.copy() + expected[1, :] = "AAA" + assert out == pytest.approx(expected) + + +class TestDocStringArguments(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "string.f")] + + def test_example(self): + a = np.array(b"123\0\0") + b = np.array(b"123\0\0") + c = np.array(b"123") + d = np.array(b"123") + + self.module.foo(a, b, c, d) + + assert a.tobytes() == b"123\0\0" + assert b.tobytes() == b"B23\0\0" + assert c.tobytes() == b"123" + assert d.tobytes() == b"D23" + + +class TestFixedString(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "fixed_string.f90")] + + @staticmethod + def _sint(s, start=0, end=None): + """Return the content of a string buffer as integer value. + + For example: + _sint('1234') -> 4321 + _sint('123A') -> 17321 + """ + if isinstance(s, np.ndarray): + s = s.tobytes() + elif isinstance(s, str): + s = s.encode() + assert isinstance(s, bytes) + if end is None: + end = len(s) + i = 0 + for j in range(start, min(end, len(s))): + i += s[j] * 10**j + return i + + def _get_input(self, intent="in"): + if intent in ["in"]: + yield "" + yield "1" + yield "1234" + yield "12345" + yield b"" + yield b"\0" + yield b"1" + yield b"\01" + yield b"1\0" + yield b"1234" + yield b"12345" + yield np.ndarray((), np.bytes_, buffer=b"") # array(b'', dtype='|S0') + yield np.array(b"") # array(b'', dtype='|S1') + yield np.array(b"\0") + yield np.array(b"1") + yield np.array(b"1\0") + yield np.array(b"\01") + yield np.array(b"1234") + yield np.array(b"123\0") + yield np.array(b"12345") + + def test_intent_in(self): + for s in self._get_input(): + r = self.module.test_in_bytes4(s) + # also checks that s is not changed inplace + expected = self._sint(s, end=4) + assert r == expected, s + + def test_intent_inout(self): + for s in self._get_input(intent="inout"): + rest = self._sint(s, start=4) + r = self.module.test_inout_bytes4(s) + expected = self._sint(s, end=4) + assert r == expected + + # check that the rest of input string is preserved + assert rest == self._sint(s, start=4) diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_symbolic.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_symbolic.py new file mode 100644 index 0000000000000000000000000000000000000000..14d068fc3a1097a55d07a6c22c285cea374eaa50 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_symbolic.py @@ -0,0 +1,494 @@ +import pytest + +from numpy.f2py.symbolic import ( + Expr, + Op, + ArithOp, + Language, + as_symbol, + as_number, + as_string, + as_array, + as_complex, + as_terms, + as_factors, + eliminate_quotes, + insert_quotes, + fromstring, + as_expr, + as_apply, + as_numer_denom, + as_ternary, + as_ref, + as_deref, + normalize, + as_eq, + as_ne, + as_lt, + as_gt, + as_le, + as_ge, +) +from . import util + + +class TestSymbolic(util.F2PyTest): + def test_eliminate_quotes(self): + def worker(s): + r, d = eliminate_quotes(s) + s1 = insert_quotes(r, d) + assert s1 == s + + for kind in ["", "mykind_"]: + worker(kind + '"1234" // "ABCD"') + worker(kind + '"1234" // ' + kind + '"ABCD"') + worker(kind + "\"1234\" // 'ABCD'") + worker(kind + '"1234" // ' + kind + "'ABCD'") + worker(kind + '"1\\"2\'AB\'34"') + worker("a = " + kind + "'1\\'2\"AB\"34'") + + def test_sanity(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x.op == Op.SYMBOL + assert repr(x) == "Expr(Op.SYMBOL, 'x')" + assert x == x + assert x != y + assert hash(x) is not None + + n = as_number(123) + m = as_number(456) + assert n.op == Op.INTEGER + assert repr(n) == "Expr(Op.INTEGER, (123, 4))" + assert n == n + assert n != m + assert hash(n) is not None + + fn = as_number(12.3) + fm = as_number(45.6) + assert fn.op == Op.REAL + assert repr(fn) == "Expr(Op.REAL, (12.3, 4))" + assert fn == fn + assert fn != fm + assert hash(fn) is not None + + c = as_complex(1, 2) + c2 = as_complex(3, 4) + assert c.op == Op.COMPLEX + assert repr(c) == ("Expr(Op.COMPLEX, (Expr(Op.INTEGER, (1, 4))," + " Expr(Op.INTEGER, (2, 4))))") + assert c == c + assert c != c2 + assert hash(c) is not None + + s = as_string("'123'") + s2 = as_string('"ABC"') + assert s.op == Op.STRING + assert repr(s) == "Expr(Op.STRING, (\"'123'\", 1))", repr(s) + assert s == s + assert s != s2 + + a = as_array((n, m)) + b = as_array((n, )) + assert a.op == Op.ARRAY + assert repr(a) == ("Expr(Op.ARRAY, (Expr(Op.INTEGER, (123, 4))," + " Expr(Op.INTEGER, (456, 4))))") + assert a == a + assert a != b + + t = as_terms(x) + u = as_terms(y) + assert t.op == Op.TERMS + assert repr(t) == "Expr(Op.TERMS, {Expr(Op.SYMBOL, 'x'): 1})" + assert t == t + assert t != u + assert hash(t) is not None + + v = as_factors(x) + w = as_factors(y) + assert v.op == Op.FACTORS + assert repr(v) == "Expr(Op.FACTORS, {Expr(Op.SYMBOL, 'x'): 1})" + assert v == v + assert w != v + assert hash(v) is not None + + t = as_ternary(x, y, z) + u = as_ternary(x, z, y) + assert t.op == Op.TERNARY + assert t == t + assert t != u + assert hash(t) is not None + + e = as_eq(x, y) + f = as_lt(x, y) + assert e.op == Op.RELATIONAL + assert e == e + assert e != f + assert hash(e) is not None + + def test_tostring_fortran(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + n = as_number(123) + m = as_number(456) + a = as_array((n, m)) + c = as_complex(n, m) + + assert str(x) == "x" + assert str(n) == "123" + assert str(a) == "[123, 456]" + assert str(c) == "(123, 456)" + + assert str(Expr(Op.TERMS, {x: 1})) == "x" + assert str(Expr(Op.TERMS, {x: 2})) == "2 * x" + assert str(Expr(Op.TERMS, {x: -1})) == "-x" + assert str(Expr(Op.TERMS, {x: -2})) == "-2 * x" + assert str(Expr(Op.TERMS, {x: 1, y: 1})) == "x + y" + assert str(Expr(Op.TERMS, {x: -1, y: -1})) == "-x - y" + assert str(Expr(Op.TERMS, {x: 2, y: 3})) == "2 * x + 3 * y" + assert str(Expr(Op.TERMS, {x: -2, y: 3})) == "-2 * x + 3 * y" + assert str(Expr(Op.TERMS, {x: 2, y: -3})) == "2 * x - 3 * y" + + assert str(Expr(Op.FACTORS, {x: 1})) == "x" + assert str(Expr(Op.FACTORS, {x: 2})) == "x ** 2" + assert str(Expr(Op.FACTORS, {x: -1})) == "x ** -1" + assert str(Expr(Op.FACTORS, {x: -2})) == "x ** -2" + assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == "x * y" + assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == "x ** 2 * y ** 3" + + v = Expr(Op.FACTORS, {x: 2, Expr(Op.TERMS, {x: 1, y: 1}): 3}) + assert str(v) == "x ** 2 * (x + y) ** 3", str(v) + v = Expr(Op.FACTORS, {x: 2, Expr(Op.FACTORS, {x: 1, y: 1}): 3}) + assert str(v) == "x ** 2 * (x * y) ** 3", str(v) + + assert str(Expr(Op.APPLY, ("f", (), {}))) == "f()" + assert str(Expr(Op.APPLY, ("f", (x, ), {}))) == "f(x)" + assert str(Expr(Op.APPLY, ("f", (x, y), {}))) == "f(x, y)" + assert str(Expr(Op.INDEXING, ("f", x))) == "f[x]" + + assert str(as_ternary(x, y, z)) == "merge(y, z, x)" + assert str(as_eq(x, y)) == "x .eq. y" + assert str(as_ne(x, y)) == "x .ne. y" + assert str(as_lt(x, y)) == "x .lt. y" + assert str(as_le(x, y)) == "x .le. y" + assert str(as_gt(x, y)) == "x .gt. y" + assert str(as_ge(x, y)) == "x .ge. y" + + def test_tostring_c(self): + language = Language.C + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + n = as_number(123) + + assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == "x * x" + assert (Expr(Op.FACTORS, { + x + y: 2 + }).tostring(language=language) == "(x + y) * (x + y)") + assert Expr(Op.FACTORS, { + x: 12 + }).tostring(language=language) == "pow(x, 12)" + + assert as_apply(ArithOp.DIV, x, + y).tostring(language=language) == "x / y" + assert (as_apply(ArithOp.DIV, x, + x + y).tostring(language=language) == "x / (x + y)") + assert (as_apply(ArithOp.DIV, x - y, x + + y).tostring(language=language) == "(x - y) / (x + y)") + assert (x + (x - y) / (x + y) + + n).tostring(language=language) == "123 + x + (x - y) / (x + y)" + + assert as_ternary(x, y, z).tostring(language=language) == "(x?y:z)" + assert as_eq(x, y).tostring(language=language) == "x == y" + assert as_ne(x, y).tostring(language=language) == "x != y" + assert as_lt(x, y).tostring(language=language) == "x < y" + assert as_le(x, y).tostring(language=language) == "x <= y" + assert as_gt(x, y).tostring(language=language) == "x > y" + assert as_ge(x, y).tostring(language=language) == "x >= y" + + def test_operations(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x + x == Expr(Op.TERMS, {x: 2}) + assert x - x == Expr(Op.INTEGER, (0, 4)) + assert x + y == Expr(Op.TERMS, {x: 1, y: 1}) + assert x - y == Expr(Op.TERMS, {x: 1, y: -1}) + assert x * x == Expr(Op.FACTORS, {x: 2}) + assert x * y == Expr(Op.FACTORS, {x: 1, y: 1}) + + assert +x == x + assert -x == Expr(Op.TERMS, {x: -1}), repr(-x) + assert 2 * x == Expr(Op.TERMS, {x: 2}) + assert 2 + x == Expr(Op.TERMS, {x: 1, as_number(1): 2}) + assert 2 * x + 3 * y == Expr(Op.TERMS, {x: 2, y: 3}) + assert (x + y) * 2 == Expr(Op.TERMS, {x: 2, y: 2}) + + assert x**2 == Expr(Op.FACTORS, {x: 2}) + assert (x + y)**2 == Expr( + Op.TERMS, + { + Expr(Op.FACTORS, {x: 2}): 1, + Expr(Op.FACTORS, {y: 2}): 1, + Expr(Op.FACTORS, { + x: 1, + y: 1 + }): 2, + }, + ) + assert (x + y) * x == x**2 + x * y + assert (x + y)**2 == x**2 + 2 * x * y + y**2 + assert (x + y)**2 + (x - y)**2 == 2 * x**2 + 2 * y**2 + assert (x + y) * z == x * z + y * z + assert z * (x + y) == x * z + y * z + + assert (x / 2) == as_apply(ArithOp.DIV, x, as_number(2)) + assert (2 * x / 2) == x + assert (3 * x / 2) == as_apply(ArithOp.DIV, 3 * x, as_number(2)) + assert (4 * x / 2) == 2 * x + assert (5 * x / 2) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) + assert (6 * x / 2) == 3 * x + assert ((3 * 5) * x / 6) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) + assert (30 * x**2 * y**4 / (24 * x**3 * y**3)) == as_apply( + ArithOp.DIV, 5 * y, 4 * x) + assert ((15 * x / 6) / 5) == as_apply(ArithOp.DIV, x, + as_number(2)), (15 * x / 6) / 5 + assert (x / (5 / x)) == as_apply(ArithOp.DIV, x**2, as_number(5)) + + assert (x / 2.0) == Expr(Op.TERMS, {x: 0.5}) + + s = as_string('"ABC"') + t = as_string('"123"') + + assert s // t == Expr(Op.STRING, ('"ABC123"', 1)) + assert s // x == Expr(Op.CONCAT, (s, x)) + assert x // s == Expr(Op.CONCAT, (x, s)) + + c = as_complex(1.0, 2.0) + assert -c == as_complex(-1.0, -2.0) + assert c + c == as_expr((1 + 2j) * 2) + assert c * c == as_expr((1 + 2j)**2) + + def test_substitute(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + a = as_array((x, y)) + + assert x.substitute({x: y}) == y + assert (x + y).substitute({x: z}) == y + z + assert (x * y).substitute({x: z}) == y * z + assert (x**4).substitute({x: z}) == z**4 + assert (x / y).substitute({x: z}) == z / y + assert x.substitute({x: y + z}) == y + z + assert a.substitute({x: y + z}) == as_array((y + z, y)) + + assert as_ternary(x, y, + z).substitute({x: y + z}) == as_ternary(y + z, y, z) + assert as_eq(x, y).substitute({x: y + z}) == as_eq(y + z, y) + + def test_fromstring(self): + + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + f = as_symbol("f") + s = as_string('"ABC"') + t = as_string('"123"') + a = as_array((x, y)) + + assert fromstring("x") == x + assert fromstring("+ x") == x + assert fromstring("- x") == -x + assert fromstring("x + y") == x + y + assert fromstring("x + 1") == x + 1 + assert fromstring("x * y") == x * y + assert fromstring("x * 2") == x * 2 + assert fromstring("x / y") == x / y + assert fromstring("x ** 2", language=Language.Python) == x**2 + assert fromstring("x ** 2 ** 3", language=Language.Python) == x**2**3 + assert fromstring("(x + y) * z") == (x + y) * z + + assert fromstring("f(x)") == f(x) + assert fromstring("f(x,y)") == f(x, y) + assert fromstring("f[x]") == f[x] + assert fromstring("f[x][y]") == f[x][y] + + assert fromstring('"ABC"') == s + assert (normalize( + fromstring('"ABC" // "123" ', + language=Language.Fortran)) == s // t) + assert fromstring('f("ABC")') == f(s) + assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', "MYSTRKIND") + + assert fromstring("(/x, y/)") == a, fromstring("(/x, y/)") + assert fromstring("f((/x, y/))") == f(a) + assert fromstring("(/(x+y)*z/)") == as_array(((x + y) * z, )) + + assert fromstring("123") == as_number(123) + assert fromstring("123_2") == as_number(123, 2) + assert fromstring("123_myintkind") == as_number(123, "myintkind") + + assert fromstring("123.0") == as_number(123.0, 4) + assert fromstring("123.0_4") == as_number(123.0, 4) + assert fromstring("123.0_8") == as_number(123.0, 8) + assert fromstring("123.0e0") == as_number(123.0, 4) + assert fromstring("123.0d0") == as_number(123.0, 8) + assert fromstring("123d0") == as_number(123.0, 8) + assert fromstring("123e-0") == as_number(123.0, 4) + assert fromstring("123d+0") == as_number(123.0, 8) + assert fromstring("123.0_myrealkind") == as_number(123.0, "myrealkind") + assert fromstring("3E4") == as_number(30000.0, 4) + + assert fromstring("(1, 2)") == as_complex(1, 2) + assert fromstring("(1e2, PI)") == as_complex(as_number(100.0), + as_symbol("PI")) + + assert fromstring("[1, 2]") == as_array((as_number(1), as_number(2))) + + assert fromstring("POINT(x, y=1)") == as_apply(as_symbol("POINT"), + x, + y=as_number(1)) + assert fromstring( + 'PERSON(name="John", age=50, shape=(/34, 23/))') == as_apply( + as_symbol("PERSON"), + name=as_string('"John"'), + age=as_number(50), + shape=as_array((as_number(34), as_number(23))), + ) + + assert fromstring("x?y:z") == as_ternary(x, y, z) + + assert fromstring("*x") == as_deref(x) + assert fromstring("**x") == as_deref(as_deref(x)) + assert fromstring("&x") == as_ref(x) + assert fromstring("(*x) * (*y)") == as_deref(x) * as_deref(y) + assert fromstring("(*x) * *y") == as_deref(x) * as_deref(y) + assert fromstring("*x * *y") == as_deref(x) * as_deref(y) + assert fromstring("*x**y") == as_deref(x) * as_deref(y) + + assert fromstring("x == y") == as_eq(x, y) + assert fromstring("x != y") == as_ne(x, y) + assert fromstring("x < y") == as_lt(x, y) + assert fromstring("x > y") == as_gt(x, y) + assert fromstring("x <= y") == as_le(x, y) + assert fromstring("x >= y") == as_ge(x, y) + + assert fromstring("x .eq. y", language=Language.Fortran) == as_eq(x, y) + assert fromstring("x .ne. y", language=Language.Fortran) == as_ne(x, y) + assert fromstring("x .lt. y", language=Language.Fortran) == as_lt(x, y) + assert fromstring("x .gt. y", language=Language.Fortran) == as_gt(x, y) + assert fromstring("x .le. y", language=Language.Fortran) == as_le(x, y) + assert fromstring("x .ge. y", language=Language.Fortran) == as_ge(x, y) + + def test_traverse(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + f = as_symbol("f") + + # Use traverse to substitute a symbol + def replace_visit(s, r=z): + if s == x: + return r + + assert x.traverse(replace_visit) == z + assert y.traverse(replace_visit) == y + assert z.traverse(replace_visit) == z + assert (f(y)).traverse(replace_visit) == f(y) + assert (f(x)).traverse(replace_visit) == f(z) + assert (f[y]).traverse(replace_visit) == f[y] + assert (f[z]).traverse(replace_visit) == f[z] + assert (x + y + z).traverse(replace_visit) == (2 * z + y) + assert (x + + f(y, x - z)).traverse(replace_visit) == (z + + f(y, as_number(0))) + assert as_eq(x, y).traverse(replace_visit) == as_eq(z, y) + + # Use traverse to collect symbols, method 1 + function_symbols = set() + symbols = set() + + def collect_symbols(s): + if s.op is Op.APPLY: + oper = s.data[0] + function_symbols.add(oper) + if oper in symbols: + symbols.remove(oper) + elif s.op is Op.SYMBOL and s not in function_symbols: + symbols.add(s) + + (x + f(y, x - z)).traverse(collect_symbols) + assert function_symbols == {f} + assert symbols == {x, y, z} + + # Use traverse to collect symbols, method 2 + def collect_symbols2(expr, symbols): + if expr.op is Op.SYMBOL: + symbols.add(expr) + + symbols = set() + (x + f(y, x - z)).traverse(collect_symbols2, symbols) + assert symbols == {x, y, z, f} + + # Use traverse to partially collect symbols + def collect_symbols3(expr, symbols): + if expr.op is Op.APPLY: + # skip traversing function calls + return expr + if expr.op is Op.SYMBOL: + symbols.add(expr) + + symbols = set() + (x + f(y, x - z)).traverse(collect_symbols3, symbols) + assert symbols == {x} + + def test_linear_solve(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x.linear_solve(x) == (as_number(1), as_number(0)) + assert (x + 1).linear_solve(x) == (as_number(1), as_number(1)) + assert (2 * x).linear_solve(x) == (as_number(2), as_number(0)) + assert (2 * x + 3).linear_solve(x) == (as_number(2), as_number(3)) + assert as_number(3).linear_solve(x) == (as_number(0), as_number(3)) + assert y.linear_solve(x) == (as_number(0), y) + assert (y * z).linear_solve(x) == (as_number(0), y * z) + + assert (x + y).linear_solve(x) == (as_number(1), y) + assert (z * x + y).linear_solve(x) == (z, y) + assert ((z + y) * x + y).linear_solve(x) == (z + y, y) + assert (z * y * x + y).linear_solve(x) == (z * y, y) + + pytest.raises(RuntimeError, lambda: (x * x).linear_solve(x)) + + def test_as_numer_denom(self): + x = as_symbol("x") + y = as_symbol("y") + n = as_number(123) + + assert as_numer_denom(x) == (x, as_number(1)) + assert as_numer_denom(x / n) == (x, n) + assert as_numer_denom(n / x) == (n, x) + assert as_numer_denom(x / y) == (x, y) + assert as_numer_denom(x * y) == (x * y, as_number(1)) + assert as_numer_denom(n + x / y) == (x + n * y, y) + assert as_numer_denom(n + x / (y - x / n)) == (y * n**2, y * n - x) + + def test_polynomial_atoms(self): + x = as_symbol("x") + y = as_symbol("y") + n = as_number(123) + + assert x.polynomial_atoms() == {x} + assert n.polynomial_atoms() == set() + assert (y[x]).polynomial_atoms() == {y[x]} + assert (y(x)).polynomial_atoms() == {y(x)} + assert (y(x) + x).polynomial_atoms() == {y(x), x} + assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]} + assert (y(x)**x).polynomial_atoms() == {y(x)} diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/test_value_attrspec.py b/phivenv/Lib/site-packages/numpy/f2py/tests/test_value_attrspec.py new file mode 100644 index 0000000000000000000000000000000000000000..9ca08588b916aee9a1dad7a44febdf7ae0c4d9b1 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/test_value_attrspec.py @@ -0,0 +1,15 @@ +import os +import pytest + +from . import util + +class TestValueAttr(util.F2PyTest): + sources = [util.getpath("tests", "src", "value_attrspec", "gh21665.f90")] + + # gh-21665 + @pytest.mark.slow + def test_gh21665(self): + inp = 2 + out = self.module.fortfuncs.square(inp) + exp_out = 4 + assert out == exp_out diff --git a/phivenv/Lib/site-packages/numpy/f2py/tests/util.py b/phivenv/Lib/site-packages/numpy/f2py/tests/util.py new file mode 100644 index 0000000000000000000000000000000000000000..b837ca9c256dbe28a0dc167e89ddbbeba3bdef29 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/tests/util.py @@ -0,0 +1,431 @@ +""" +Utility functions for + +- building and importing modules on test time, using a temporary location +- detecting if compilers are present +- determining paths to tests + +""" +import glob +import os +import sys +import subprocess +import tempfile +import shutil +import atexit +import textwrap +import re +import pytest +import contextlib +import numpy +import concurrent.futures + +from pathlib import Path +from numpy._utils import asunicode +from numpy.testing import temppath, IS_WASM +from importlib import import_module +from numpy.f2py._backends._meson import MesonBackend + +# +# Maintaining a temporary module directory +# + +_module_dir = None +_module_num = 5403 + +if sys.platform == "cygwin": + NUMPY_INSTALL_ROOT = Path(__file__).parent.parent.parent + _module_list = list(NUMPY_INSTALL_ROOT.glob("**/*.dll")) + + +def _cleanup(): + global _module_dir + if _module_dir is not None: + try: + sys.path.remove(_module_dir) + except ValueError: + pass + try: + shutil.rmtree(_module_dir) + except OSError: + pass + _module_dir = None + + +def get_module_dir(): + global _module_dir + if _module_dir is None: + _module_dir = tempfile.mkdtemp() + atexit.register(_cleanup) + if _module_dir not in sys.path: + sys.path.insert(0, _module_dir) + return _module_dir + + +def get_temp_module_name(): + # Assume single-threaded, and the module dir usable only by this thread + global _module_num + get_module_dir() + name = "_test_ext_module_%d" % _module_num + _module_num += 1 + if name in sys.modules: + # this should not be possible, but check anyway + raise RuntimeError("Temporary module name already in use.") + return name + + +def _memoize(func): + memo = {} + + def wrapper(*a, **kw): + key = repr((a, kw)) + if key not in memo: + try: + memo[key] = func(*a, **kw) + except Exception as e: + memo[key] = e + raise + ret = memo[key] + if isinstance(ret, Exception): + raise ret + return ret + + wrapper.__name__ = func.__name__ + return wrapper + + +# +# Building modules +# + + +@_memoize +def build_module(source_files, options=[], skip=[], only=[], module_name=None): + """ + Compile and import a f2py module, built from the given files. + + """ + + code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()" + + d = get_module_dir() + + # Copy files + dst_sources = [] + f2py_sources = [] + for fn in source_files: + if not os.path.isfile(fn): + raise RuntimeError("%s is not a file" % fn) + dst = os.path.join(d, os.path.basename(fn)) + shutil.copyfile(fn, dst) + dst_sources.append(dst) + + base, ext = os.path.splitext(dst) + if ext in (".f90", ".f95", ".f", ".c", ".pyf"): + f2py_sources.append(dst) + + assert f2py_sources + + # Prepare options + if module_name is None: + module_name = get_temp_module_name() + f2py_opts = ["-c", "-m", module_name] + options + f2py_sources + f2py_opts += ["--backend", "meson"] + if skip: + f2py_opts += ["skip:"] + skip + if only: + f2py_opts += ["only:"] + only + + # Build + cwd = os.getcwd() + try: + os.chdir(d) + cmd = [sys.executable, "-c", code] + f2py_opts + p = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + if p.returncode != 0: + raise RuntimeError("Running f2py failed: %s\n%s" % + (cmd[4:], asunicode(out))) + finally: + os.chdir(cwd) + + # Partial cleanup + for fn in dst_sources: + os.unlink(fn) + + # Rebase (Cygwin-only) + if sys.platform == "cygwin": + # If someone starts deleting modules after import, this will + # need to change to record how big each module is, rather than + # relying on rebase being able to find that from the files. + _module_list.extend( + glob.glob(os.path.join(d, "{:s}*".format(module_name))) + ) + subprocess.check_call( + ["/usr/bin/rebase", "--database", "--oblivious", "--verbose"] + + _module_list + ) + + # Import + return import_module(module_name) + + +@_memoize +def build_code(source_code, + options=[], + skip=[], + only=[], + suffix=None, + module_name=None): + """ + Compile and import Fortran code using f2py. + + """ + if suffix is None: + suffix = ".f" + with temppath(suffix=suffix) as path: + with open(path, "w") as f: + f.write(source_code) + return build_module([path], + options=options, + skip=skip, + only=only, + module_name=module_name) + + +# +# Check if compilers are available at all... +# + +def check_language(lang, code_snippet=None): + tmpdir = tempfile.mkdtemp() + try: + meson_file = os.path.join(tmpdir, "meson.build") + with open(meson_file, "w") as f: + f.write("project('check_compilers')\n") + f.write(f"add_languages('{lang}')\n") + if code_snippet: + f.write(f"{lang}_compiler = meson.get_compiler('{lang}')\n") + f.write(f"{lang}_code = '''{code_snippet}'''\n") + f.write( + f"_have_{lang}_feature =" + f"{lang}_compiler.compiles({lang}_code," + f" name: '{lang} feature check')\n" + ) + runmeson = subprocess.run( + ["meson", "setup", "btmp"], + check=False, + cwd=tmpdir, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + if runmeson.returncode == 0: + return True + else: + return False + finally: + shutil.rmtree(tmpdir) + return False + +fortran77_code = ''' +C Example Fortran 77 code + PROGRAM HELLO + PRINT *, 'Hello, Fortran 77!' + END +''' + +fortran90_code = ''' +! Example Fortran 90 code +program hello90 + type :: greeting + character(len=20) :: text + end type greeting + + type(greeting) :: greet + greet%text = 'hello, fortran 90!' + print *, greet%text +end program hello90 +''' + +# Dummy class for caching relevant checks +class CompilerChecker: + def __init__(self): + self.compilers_checked = False + self.has_c = False + self.has_f77 = False + self.has_f90 = False + + def check_compilers(self): + if (not self.compilers_checked) and (not sys.platform == "cygwin"): + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = [ + executor.submit(check_language, "c"), + executor.submit(check_language, "fortran", fortran77_code), + executor.submit(check_language, "fortran", fortran90_code) + ] + + self.has_c = futures[0].result() + self.has_f77 = futures[1].result() + self.has_f90 = futures[2].result() + + self.compilers_checked = True + +if not IS_WASM: + checker = CompilerChecker() + checker.check_compilers() + +def has_c_compiler(): + return checker.has_c + +def has_f77_compiler(): + return checker.has_f77 + +def has_f90_compiler(): + return checker.has_f90 + +# +# Building with meson +# + + +class SimplifiedMesonBackend(MesonBackend): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def compile(self): + self.write_meson_build(self.build_dir) + self.run_meson(self.build_dir) + + +def build_meson(source_files, module_name=None, **kwargs): + """ + Build a module via Meson and import it. + """ + build_dir = get_module_dir() + if module_name is None: + module_name = get_temp_module_name() + + # Initialize the MesonBackend + backend = SimplifiedMesonBackend( + modulename=module_name, + sources=source_files, + extra_objects=kwargs.get("extra_objects", []), + build_dir=build_dir, + include_dirs=kwargs.get("include_dirs", []), + library_dirs=kwargs.get("library_dirs", []), + libraries=kwargs.get("libraries", []), + define_macros=kwargs.get("define_macros", []), + undef_macros=kwargs.get("undef_macros", []), + f2py_flags=kwargs.get("f2py_flags", []), + sysinfo_flags=kwargs.get("sysinfo_flags", []), + fc_flags=kwargs.get("fc_flags", []), + flib_flags=kwargs.get("flib_flags", []), + setup_flags=kwargs.get("setup_flags", []), + remove_build_dir=kwargs.get("remove_build_dir", False), + extra_dat=kwargs.get("extra_dat", {}), + ) + + # Compile the module + # NOTE: Catch-all since without distutils it is hard to determine which + # compiler stack is on the CI + try: + backend.compile() + except subprocess.CalledProcessError: + pytest.skip("Failed to compile module") + + # Import the compiled module + sys.path.insert(0, f"{build_dir}/{backend.meson_build_dir}") + return import_module(module_name) + + +# +# Unittest convenience +# + + +class F2PyTest: + code = None + sources = None + options = [] + skip = [] + only = [] + suffix = ".f" + module = None + _has_c_compiler = None + _has_f77_compiler = None + _has_f90_compiler = None + + @property + def module_name(self): + cls = type(self) + return f'_{cls.__module__.rsplit(".",1)[-1]}_{cls.__name__}_ext_module' + + @classmethod + def setup_class(cls): + if sys.platform == "win32": + pytest.skip("Fails with MinGW64 Gfortran (Issue #9673)") + F2PyTest._has_c_compiler = has_c_compiler() + F2PyTest._has_f77_compiler = has_f77_compiler() + F2PyTest._has_f90_compiler = has_f90_compiler() + + def setup_method(self): + if self.module is not None: + return + + codes = self.sources if self.sources else [] + if self.code: + codes.append(self.suffix) + + needs_f77 = any(str(fn).endswith(".f") for fn in codes) + needs_f90 = any(str(fn).endswith(".f90") for fn in codes) + needs_pyf = any(str(fn).endswith(".pyf") for fn in codes) + + if needs_f77 and not self._has_f77_compiler: + pytest.skip("No Fortran 77 compiler available") + if needs_f90 and not self._has_f90_compiler: + pytest.skip("No Fortran 90 compiler available") + if needs_pyf and not (self._has_f90_compiler or self._has_f77_compiler): + pytest.skip("No Fortran compiler available") + + # Build the module + if self.code is not None: + self.module = build_code( + self.code, + options=self.options, + skip=self.skip, + only=self.only, + suffix=self.suffix, + module_name=self.module_name, + ) + + if self.sources is not None: + self.module = build_module( + self.sources, + options=self.options, + skip=self.skip, + only=self.only, + module_name=self.module_name, + ) + + +# +# Helper functions +# + + +def getpath(*a): + # Package root + d = Path(numpy.f2py.__file__).parent.resolve() + return d.joinpath(*a) + + +@contextlib.contextmanager +def switchdir(path): + curpath = Path.cwd() + os.chdir(path) + try: + yield + finally: + os.chdir(curpath) diff --git a/phivenv/Lib/site-packages/numpy/f2py/use_rules.py b/phivenv/Lib/site-packages/numpy/f2py/use_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..25b89bb12ddd04640783dc0458cff3fb427a42ce --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/f2py/use_rules.py @@ -0,0 +1,106 @@ +""" +Build 'use others module data' mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +__version__ = "$Revision: 1.3 $"[10:-1] + +f2py_version = 'See `f2py -v`' + + +from .auxfuncs import ( + applyrules, dictappend, gentitle, hasnote, outmess +) + + +usemodule_rules = { + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\ +\t #name# = get_#name#()\\n\\ +Arguments:\\n\\ +#docstr#\"; +extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#); +static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) { +/*#decl#*/ +\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail; +printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#)); +\treturn Py_BuildValue(\"\"); +capi_fail: +\treturn NULL; +} +""", + 'method': '\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},', + 'need': ['F_MODFUNC'] +} + +################ + + +def buildusevars(m, r): + ret = {} + outmess( + '\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n' % (m['name'])) + varsmap = {} + revmap = {} + if 'map' in r: + for k in r['map'].keys(): + if r['map'][k] in revmap: + outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % ( + r['map'][k], k, revmap[r['map'][k]])) + else: + revmap[r['map'][k]] = k + if 'only' in r and r['only']: + for v in r['map'].keys(): + if r['map'][v] in m['vars']: + + if revmap[r['map'][v]] == v: + varsmap[v] = r['map'][v] + else: + outmess('\t\t\tIgnoring map "%s=>%s". See above.\n' % + (v, r['map'][v])) + else: + outmess( + '\t\t\tNo definition for variable "%s=>%s". Skipping.\n' % (v, r['map'][v])) + else: + for v in m['vars'].keys(): + if v in revmap: + varsmap[v] = revmap[v] + else: + varsmap[v] = v + for v in varsmap.keys(): + ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) + return ret + + +def buildusevar(name, realname, vars, usemodulename): + outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % ( + name, realname)) + ret = {} + vrd = {'name': name, + 'realname': realname, + 'REALNAME': realname.upper(), + 'usemodulename': usemodulename, + 'USEMODULENAME': usemodulename.upper(), + 'texname': name.replace('_', '\\_'), + 'begintitle': gentitle('%s=>%s' % (name, realname)), + 'endtitle': gentitle('end of %s=>%s' % (name, realname)), + 'apiname': '#modulename#_use_%s_from_%s' % (realname, usemodulename) + } + nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv', + 5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'} + vrd['texnamename'] = name + for i in nummap.keys(): + vrd['texnamename'] = vrd['texnamename'].replace(repr(i), nummap[i]) + if hasnote(vars[realname]): + vrd['note'] = vars[realname]['note'] + rd = dictappend({}, vrd) + + print(name, realname, vars[realname]) + ret = applyrules(usemodule_rules, rd) + return ret diff --git a/phivenv/Lib/site-packages/numpy/fft/__init__.py b/phivenv/Lib/site-packages/numpy/fft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..410108148549f427ef6d8d5a6772a7844514e5e8 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/fft/__init__.py @@ -0,0 +1,215 @@ +""" +Discrete Fourier Transform (:mod:`numpy.fft`) +============================================= + +.. currentmodule:: numpy.fft + +The SciPy module `scipy.fft` is a more comprehensive superset +of ``numpy.fft``, which includes only a basic set of routines. + +Standard FFTs +------------- + +.. autosummary:: + :toctree: generated/ + + fft Discrete Fourier transform. + ifft Inverse discrete Fourier transform. + fft2 Discrete Fourier transform in two dimensions. + ifft2 Inverse discrete Fourier transform in two dimensions. + fftn Discrete Fourier transform in N-dimensions. + ifftn Inverse discrete Fourier transform in N dimensions. + +Real FFTs +--------- + +.. autosummary:: + :toctree: generated/ + + rfft Real discrete Fourier transform. + irfft Inverse real discrete Fourier transform. + rfft2 Real discrete Fourier transform in two dimensions. + irfft2 Inverse real discrete Fourier transform in two dimensions. + rfftn Real discrete Fourier transform in N dimensions. + irfftn Inverse real discrete Fourier transform in N dimensions. + +Hermitian FFTs +-------------- + +.. autosummary:: + :toctree: generated/ + + hfft Hermitian discrete Fourier transform. + ihfft Inverse Hermitian discrete Fourier transform. + +Helper routines +--------------- + +.. autosummary:: + :toctree: generated/ + + fftfreq Discrete Fourier Transform sample frequencies. + rfftfreq DFT sample frequencies (for usage with rfft, irfft). + fftshift Shift zero-frequency component to center of spectrum. + ifftshift Inverse of fftshift. + + +Background information +---------------------- + +Fourier analysis is fundamentally a method for expressing a function as a +sum of periodic components, and for recovering the function from those +components. When both the function and its Fourier transform are +replaced with discretized counterparts, it is called the discrete Fourier +transform (DFT). The DFT has become a mainstay of numerical computing in +part because of a very fast algorithm for computing it, called the Fast +Fourier Transform (FFT), which was known to Gauss (1805) and was brought +to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ +provide an accessible introduction to Fourier analysis and its +applications. + +Because the discrete Fourier transform separates its input into +components that contribute at discrete frequencies, it has a great number +of applications in digital signal processing, e.g., for filtering, and in +this context the discretized input to the transform is customarily +referred to as a *signal*, which exists in the *time domain*. The output +is called a *spectrum* or *transform* and exists in the *frequency +domain*. + +Implementation details +---------------------- + +There are many ways to define the DFT, varying in the sign of the +exponent, normalization, etc. In this implementation, the DFT is defined +as + +.. math:: + A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} + \\qquad k = 0,\\ldots,n-1. + +The DFT is in general defined for complex inputs and outputs, and a +single-frequency component at linear frequency :math:`f` is +represented by a complex exponential +:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` +is the sampling interval. + +The values in the result follow so-called "standard" order: If ``A = +fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of +the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` +contains the positive-frequency terms, and ``A[n/2+1:]`` contains the +negative-frequency terms, in order of decreasingly negative frequency. +For an even number of input points, ``A[n/2]`` represents both positive and +negative Nyquist frequency, and is also purely real for real input. For +an odd number of input points, ``A[(n-1)/2]`` contains the largest positive +frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. +The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies +of corresponding elements in the output. The routine +``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the +zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes +that shift. + +When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` +is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. +The phase spectrum is obtained by ``np.angle(A)``. + +The inverse DFT is defined as + +.. math:: + a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} + \\qquad m = 0,\\ldots,n-1. + +It differs from the forward transform by the sign of the exponential +argument and the default normalization by :math:`1/n`. + +Type Promotion +-------------- + +`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and +``complex128`` arrays respectively. For an FFT implementation that does not +promote input arrays, see `scipy.fftpack`. + +Normalization +------------- + +The argument ``norm`` indicates which direction of the pair of direct/inverse +transforms is scaled and with what normalization factor. +The default normalization (``"backward"``) has the direct (forward) transforms +unscaled and the inverse (backward) transforms scaled by :math:`1/n`. It is +possible to obtain unitary transforms by setting the keyword argument ``norm`` +to ``"ortho"`` so that both direct and inverse transforms are scaled by +:math:`1/\\sqrt{n}`. Finally, setting the keyword argument ``norm`` to +``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse +transforms unscaled (i.e. exactly opposite to the default ``"backward"``). +`None` is an alias of the default option ``"backward"`` for backward +compatibility. + +Real and Hermitian transforms +----------------------------- + +When the input is purely real, its transform is Hermitian, i.e., the +component at frequency :math:`f_k` is the complex conjugate of the +component at frequency :math:`-f_k`, which means that for real +inputs there is no information in the negative frequency components that +is not already available from the positive frequency components. +The family of `rfft` functions is +designed to operate on real inputs, and exploits this symmetry by +computing only the positive frequency components, up to and including the +Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex +output points. The inverses of this family assumes the same symmetry of +its input, and for an output of ``n`` points uses ``n/2+1`` input points. + +Correspondingly, when the spectrum is purely real, the signal is +Hermitian. The `hfft` family of functions exploits this symmetry by +using ``n/2+1`` complex points in the input (time) domain for ``n`` real +points in the frequency domain. + +In higher dimensions, FFTs are used, e.g., for image analysis and +filtering. The computational efficiency of the FFT means that it can +also be a faster way to compute large convolutions, using the property +that a convolution in the time domain is equivalent to a point-by-point +multiplication in the frequency domain. + +Higher dimensions +----------------- + +In two dimensions, the DFT is defined as + +.. math:: + A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} + a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} + \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, + +which extends in the obvious way to higher dimensions, and the inverses +in higher dimensions also extend in the same way. + +References +---------- + +.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + +.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., + 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. + 12-13. Cambridge Univ. Press, Cambridge, UK. + +Examples +-------- + +For examples, see the various functions. + +""" + +from . import _pocketfft, _helper +# TODO: `numpy.fft.helper`` was deprecated in NumPy 2.0. It should +# be deleted once downstream libraries move to `numpy.fft`. +from . import helper +from ._pocketfft import * +from ._helper import * + +__all__ = _pocketfft.__all__.copy() +__all__ += _helper.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/phivenv/Lib/site-packages/numpy/fft/__init__.pyi b/phivenv/Lib/site-packages/numpy/fft/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..7a3348fcfa7025f07eb3a04250296dbb9b07800a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/fft/__init__.pyi @@ -0,0 +1,28 @@ +from numpy._pytesttester import PytestTester + +from numpy.fft._pocketfft import ( + fft as fft, + ifft as ifft, + rfft as rfft, + irfft as irfft, + hfft as hfft, + ihfft as ihfft, + rfftn as rfftn, + irfftn as irfftn, + rfft2 as rfft2, + irfft2 as irfft2, + fft2 as fft2, + ifft2 as ifft2, + fftn as fftn, + ifftn as ifftn, +) + +from numpy.fft._helper import ( + fftshift as fftshift, + ifftshift as ifftshift, + fftfreq as fftfreq, + rfftfreq as rfftfreq, +) + +__all__: list[str] +test: PytestTester diff --git a/phivenv/Lib/site-packages/numpy/fft/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/fft/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4e40f629a0131c6797539d7b653d91bbaa4d7e3 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/fft/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/fft/__pycache__/_helper.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/fft/__pycache__/_helper.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b6595d458a6c591f75feb82abc2359fab083bc0 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/fft/__pycache__/_helper.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6e1841d8f74df82ea4e311ca18d68b600a32e7f Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/fft/__pycache__/helper.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/fft/__pycache__/helper.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c3f0ecc37db774d608c86c30340fcd714a0ff3e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/fft/__pycache__/helper.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/fft/_helper.py b/phivenv/Lib/site-packages/numpy/fft/_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..781d322b415dd9fe4c771d1ef858d938d210f31b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/fft/_helper.py @@ -0,0 +1,231 @@ +""" +Discrete Fourier Transforms - _helper.py + +""" +from numpy._core import integer, empty, arange, asarray, roll +from numpy._core.overrides import array_function_dispatch, set_module + +# Created by Pearu Peterson, September 2002 + +__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] + +integer_types = (int, integer) + + +def _fftshift_dispatcher(x, axes=None): + return (x,) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def fftshift(x, axes=None): + """ + Shift the zero-frequency component to the center of the spectrum. + + This function swaps half-spaces for all axes listed (defaults to all). + Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to shift. Default is None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + ifftshift : The inverse of `fftshift`. + + Examples + -------- + >>> freqs = np.fft.fftfreq(10, 0.1) + >>> freqs + array([ 0., 1., 2., ..., -3., -2., -1.]) + >>> np.fft.fftshift(freqs) + array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) + + Shift the zero-frequency component only along the second axis: + + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.fftshift(freqs, axes=(1,)) + array([[ 2., 0., 1.], + [-4., 3., 4.], + [-1., -3., -2.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [dim // 2 for dim in x.shape] + elif isinstance(axes, integer_types): + shift = x.shape[axes] // 2 + else: + shift = [x.shape[ax] // 2 for ax in axes] + + return roll(x, shift, axes) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def ifftshift(x, axes=None): + """ + The inverse of `fftshift`. Although identical for even-length `x`, the + functions differ by one sample for odd-length `x`. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to calculate. Defaults to None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + fftshift : Shift zero-frequency component to the center of the spectrum. + + Examples + -------- + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.ifftshift(np.fft.fftshift(freqs)) + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [-(dim // 2) for dim in x.shape] + elif isinstance(axes, integer_types): + shift = -(x.shape[axes] // 2) + else: + shift = [-(x.shape[ax] // 2) for ax in axes] + + return roll(x, shift, axes) + + +@set_module('numpy.fft') +def fftfreq(n, d=1.0, device=None): + """ + Return the Discrete Fourier Transform sample frequencies. + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + f : ndarray + Array of length `n` containing the sample frequencies. + + Examples + -------- + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> fourier = np.fft.fft(signal) + >>> n = signal.size + >>> timestep = 0.1 + >>> freq = np.fft.fftfreq(n, d=timestep) + >>> freq + array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0 / (n * d) + results = empty(n, int, device=device) + N = (n-1)//2 + 1 + p1 = arange(0, N, dtype=int, device=device) + results[:N] = p1 + p2 = arange(-(n//2), 0, dtype=int, device=device) + results[N:] = p2 + return results * val + + +@set_module('numpy.fft') +def rfftfreq(n, d=1.0, device=None): + """ + Return the Discrete Fourier Transform sample frequencies + (for usage with rfft, irfft). + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd + + Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) + the Nyquist frequency component is considered to be positive. + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + f : ndarray + Array of length ``n//2 + 1`` containing the sample frequencies. + + Examples + -------- + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> fourier = np.fft.rfft(signal) + >>> n = signal.size + >>> sample_rate = 100 + >>> freq = np.fft.fftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., ..., -30., -20., -10.]) + >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., 30., 40., 50.]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0/(n*d) + N = n//2 + 1 + results = arange(0, N, dtype=int, device=device) + return results * val diff --git a/phivenv/Lib/site-packages/numpy/fft/_helper.pyi b/phivenv/Lib/site-packages/numpy/fft/_helper.pyi new file mode 100644 index 0000000000000000000000000000000000000000..378b0ae9a1575207759daadf4d43da2e9eda9efe --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/fft/_helper.pyi @@ -0,0 +1,51 @@ +from typing import Any, TypeVar, overload, Literal as L + +from numpy import generic, integer, floating, complexfloating +from numpy._typing import ( + NDArray, + ArrayLike, + _ShapeLike, + _ArrayLike, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, +) + +_SCT = TypeVar("_SCT", bound=generic) + +__all__: list[str] + +@overload +def fftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +@overload +def fftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ... + +@overload +def ifftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +@overload +def ifftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ... + +@overload +def fftfreq( + n: int | integer[Any], + d: _ArrayLikeFloat_co = ..., + device: None | L["cpu"] = ..., +) -> NDArray[floating[Any]]: ... +@overload +def fftfreq( + n: int | integer[Any], + d: _ArrayLikeComplex_co = ..., + device: None | L["cpu"] = ..., +) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def rfftfreq( + n: int | integer[Any], + d: _ArrayLikeFloat_co = ..., + device: None | L["cpu"] = ..., +) -> NDArray[floating[Any]]: ... +@overload +def rfftfreq( + n: int | integer[Any], + d: _ArrayLikeComplex_co = ..., + device: None | L["cpu"] = ..., +) -> NDArray[complexfloating[Any, Any]]: ... diff --git a/phivenv/Lib/site-packages/numpy/fft/_pocketfft.py b/phivenv/Lib/site-packages/numpy/fft/_pocketfft.py new file mode 100644 index 0000000000000000000000000000000000000000..5e6654dca998fac343dadc037aaca7bba73b9b88 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/fft/_pocketfft.py @@ -0,0 +1,1701 @@ +""" +Discrete Fourier Transforms + +Routines in this module: + +fft(a, n=None, axis=-1, norm="backward") +ifft(a, n=None, axis=-1, norm="backward") +rfft(a, n=None, axis=-1, norm="backward") +irfft(a, n=None, axis=-1, norm="backward") +hfft(a, n=None, axis=-1, norm="backward") +ihfft(a, n=None, axis=-1, norm="backward") +fftn(a, s=None, axes=None, norm="backward") +ifftn(a, s=None, axes=None, norm="backward") +rfftn(a, s=None, axes=None, norm="backward") +irfftn(a, s=None, axes=None, norm="backward") +fft2(a, s=None, axes=(-2,-1), norm="backward") +ifft2(a, s=None, axes=(-2, -1), norm="backward") +rfft2(a, s=None, axes=(-2,-1), norm="backward") +irfft2(a, s=None, axes=(-2, -1), norm="backward") + +i = inverse transform +r = transform of purely real data +h = Hermite transform +n = n-dimensional transform +2 = 2-dimensional transform +(Note: 2D routines are just nD routines with different default +behavior.) + +""" +__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', + 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn'] + +import functools +import warnings + +from numpy.lib.array_utils import normalize_axis_index +from numpy._core import (asarray, empty_like, result_type, + conjugate, take, sqrt, reciprocal) +from . import _pocketfft_umath as pfu +from numpy._core import overrides + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.fft') + + +# `inv_norm` is a float by which the result of the transform needs to be +# divided. This replaces the original, more intuitive 'fct` parameter to avoid +# divisions by zero (or alternatively additional checks) in the case of +# zero-length axes during its computation. +def _raw_fft(a, n, axis, is_real, is_forward, norm, out=None): + if n < 1: + raise ValueError(f"Invalid number of FFT data points ({n}) specified.") + + # Calculate the normalization factor, passing in the array dtype to + # avoid precision loss in the possible sqrt or reciprocal. + if not is_forward: + norm = _swap_direction(norm) + + real_dtype = result_type(a.real.dtype, 1.0) + if norm is None or norm == "backward": + fct = 1 + elif norm == "ortho": + fct = reciprocal(sqrt(n, dtype=real_dtype)) + elif norm == "forward": + fct = reciprocal(n, dtype=real_dtype) + else: + raise ValueError(f'Invalid norm value {norm}; should be "backward",' + '"ortho" or "forward".') + + n_out = n + if is_real: + if is_forward: + ufunc = pfu.rfft_n_even if n % 2 == 0 else pfu.rfft_n_odd + n_out = n // 2 + 1 + else: + ufunc = pfu.irfft + else: + ufunc = pfu.fft if is_forward else pfu.ifft + + axis = normalize_axis_index(axis, a.ndim) + + if out is None: + if is_real and not is_forward: # irfft, complex in, real output. + out_dtype = real_dtype + else: # Others, complex output. + out_dtype = result_type(a.dtype, 1j) + out = empty_like(a, shape=a.shape[:axis] + (n_out,) + a.shape[axis+1:], + dtype=out_dtype) + elif ((shape := getattr(out, "shape", None)) is not None + and (len(shape) != a.ndim or shape[axis] != n_out)): + raise ValueError("output array has wrong shape.") + + return ufunc(a, fct, axes=[(axis,), (), (axis,)], out=out) + + +_SWAP_DIRECTION_MAP = {"backward": "forward", None: "forward", + "ortho": "ortho", "forward": "backward"} + + +def _swap_direction(norm): + try: + return _SWAP_DIRECTION_MAP[norm] + except KeyError: + raise ValueError(f'Invalid norm value {norm}; should be "backward", ' + '"ortho" or "forward".') from None + + +def _fft_dispatcher(a, n=None, axis=None, norm=None, out=None): + return (a, out) + + +@array_function_dispatch(_fft_dispatcher) +def fft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the one-dimensional discrete Fourier Transform. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) with the efficient Fast Fourier Transform (FFT) + algorithm [CT]. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : for definition of the DFT and conventions used. + ifft : The inverse of `fft`. + fft2 : The two-dimensional FFT. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + fftfreq : Frequency bins for given FFT parameters. + + Notes + ----- + FFT (Fast Fourier Transform) refers to a way the discrete Fourier + Transform (DFT) can be calculated efficiently, by using symmetries in the + calculated terms. The symmetry is highest when `n` is a power of 2, and + the transform is therefore most efficient for these sizes. + + The DFT is defined, with the conventions used in this implementation, in + the documentation for the `numpy.fft` module. + + References + ---------- + .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + + Examples + -------- + >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) + array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, + 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, + -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j, + 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j]) + + In this example, real input has an FFT which is Hermitian, i.e., symmetric + in the real part and anti-symmetric in the imaginary part, as described in + the `numpy.fft` documentation: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(256) + >>> sp = np.fft.fft(np.sin(t)) + >>> freq = np.fft.fftfreq(t.shape[-1]) + >>> plt.plot(freq, sp.real, freq, sp.imag) + [, ] + >>> plt.show() + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + output = _raw_fft(a, n, axis, False, True, norm, out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def ifft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the one-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier transform computed by `fft`. In other words, + ``ifft(fft(a)) == a`` to within numerical accuracy. + For a general description of the algorithm and definitions, + see `numpy.fft`. + + The input should be ordered in the same way as is returned by `fft`, + i.e., + + * ``a[0]`` should contain the zero frequency term, + * ``a[1:n//2]`` should contain the positive-frequency terms, + * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in + increasing order starting from the most negative frequency. + + For an even number of input points, ``A[n//2]`` represents the sum of + the values at the positive and negative Nyquist frequencies, as the two + are aliased together. See `numpy.fft` for details. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + See notes about padding issues. + axis : int, optional + Axis over which to compute the inverse DFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : An introduction, with definitions and general explanations. + fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse + ifft2 : The two-dimensional inverse FFT. + ifftn : The n-dimensional inverse FFT. + + Notes + ----- + If the input parameter `n` is larger than the size of the input, the input + is padded by appending zeros at the end. Even though this is the common + approach, it might lead to surprising results. If a different padding is + desired, it must be performed before calling `ifft`. + + Examples + -------- + >>> np.fft.ifft([0, 4, 0, 0]) + array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary + + Create and plot a band-limited signal with random phases: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(400) + >>> n = np.zeros((400,), dtype=complex) + >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) + >>> s = np.fft.ifft(n) + >>> plt.plot(t, s.real, label='real') + [] + >>> plt.plot(t, s.imag, '--', label='imaginary') + [] + >>> plt.legend() + + >>> plt.show() + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + output = _raw_fft(a, n, axis, False, False, norm, out=out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def rfft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the one-dimensional discrete Fourier Transform for real input. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) of a real-valued array by means of an efficient algorithm + called the Fast Fourier Transform (FFT). + + Parameters + ---------- + a : array_like + Input array + n : int, optional + Number of points along transformation axis in the input to use. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + If `n` is even, the length of the transformed axis is ``(n/2)+1``. + If `n` is odd, the length is ``(n+1)/2``. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + irfft : The inverse of `rfft`. + fft : The one-dimensional FFT of general (complex) input. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + + Notes + ----- + When the DFT is computed for purely real input, the output is + Hermitian-symmetric, i.e. the negative frequency terms are just the complex + conjugates of the corresponding positive-frequency terms, and the + negative-frequency terms are therefore redundant. This function does not + compute the negative frequency terms, and the length of the transformed + axis of the output is therefore ``n//2 + 1``. + + When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains + the zero-frequency term 0*fs, which is real due to Hermitian symmetry. + + If `n` is even, ``A[-1]`` contains the term representing both positive + and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely + real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains + the largest positive frequency (fs/2*(n-1)/n), and is complex in the + general case. + + If the input `a` contains an imaginary part, it is silently discarded. + + Examples + -------- + >>> np.fft.fft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary + >>> np.fft.rfft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary + + Notice how the final element of the `fft` output is the complex conjugate + of the second element, for real input. For `rfft`, this symmetry is + exploited to compute only the non-negative frequency terms. + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + output = _raw_fft(a, n, axis, True, True, norm, out=out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def irfft(a, n=None, axis=-1, norm=None, out=None): + """ + Computes the inverse of `rfft`. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier Transform of real input computed by `rfft`. + In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical + accuracy. (See Notes below for why ``len(a)`` is necessary here.) + + The input is expected to be in the form returned by `rfft`, i.e. the + real zero-frequency term followed by the complex positive frequency terms + in order of increasing frequency. Since the discrete Fourier Transform of + real input is Hermitian-symmetric, the negative frequency terms are taken + to be the complex conjugates of the corresponding positive frequency terms. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. + For `n` output points, ``n//2+1`` input points are necessary. If the + input is longer than this, it is cropped. If it is shorter than this, + it is padded with zeros. If `n` is not given, it is taken to be + ``2*(m-1)`` where ``m`` is the length of the input along the axis + specified by `axis`. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*(m-1)`` where ``m`` is the length of the transformed axis of the + input. To get an odd number of output points, `n` must be specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. + fft : The one-dimensional FFT. + irfft2 : The inverse of the two-dimensional FFT of real input. + irfftn : The inverse of the *n*-dimensional FFT of real input. + + Notes + ----- + Returns the real valued `n`-point inverse discrete Fourier transform + of `a`, where `a` contains the non-negative frequency terms of a + Hermitian-symmetric sequence. `n` is the length of the result, not the + input. + + If you specify an `n` such that `a` must be zero-padded or truncated, the + extra/removed values will be added/removed at high frequencies. One can + thus resample a series to `m` points via Fourier interpolation by: + ``a_resamp = irfft(rfft(a), m)``. + + The correct interpretation of the hermitian input depends on the length of + the original data, as given by `n`. This is because each input shape could + correspond to either an odd or even length signal. By default, `irfft` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, + the value is thus treated as purely real. To avoid losing information, the + correct length of the real input **must** be given. + + Examples + -------- + >>> np.fft.ifft([1, -1j, -1, 1j]) + array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary + >>> np.fft.irfft([1, -1j, -1]) + array([0., 1., 0., 0.]) + + Notice how the last term in the input to the ordinary `ifft` is the + complex conjugate of the second term, and the output has zero imaginary + part everywhere. When calling `irfft`, the negative frequencies are not + specified, and the output array is purely real. + + """ + a = asarray(a) + if n is None: + n = (a.shape[axis] - 1) * 2 + output = _raw_fft(a, n, axis, True, False, norm, out=out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def hfft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the FFT of a signal that has Hermitian symmetry, i.e., a real + spectrum. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. For `n` output + points, ``n//2 + 1`` input points are necessary. If the input is + longer than this, it is cropped. If it is shorter than this, it is + padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)`` + where ``m`` is the length of the input along the axis specified by + `axis`. + axis : int, optional + Axis over which to compute the FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*m - 2`` where ``m`` is the length of the transformed axis of + the input. To get an odd number of output points, `n` must be + specified, for instance as ``2*m - 1`` in the typical case, + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See also + -------- + rfft : Compute the one-dimensional FFT for real input. + ihfft : The inverse of `hfft`. + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd. + + * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error. + + The correct interpretation of the hermitian input depends on the length of + the original data, as given by `n`. This is because each input shape could + correspond to either an odd or even length signal. By default, `hfft` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, + the value is thus treated as purely real. To avoid losing information, the + shape of the full signal **must** be given. + + Examples + -------- + >>> signal = np.array([1, 2, 3, 4, 3, 2]) + >>> np.fft.fft(signal) + array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary + >>> np.fft.hfft(signal[:4]) # Input first half of signal + array([15., -4., 0., -1., 0., -4.]) + >>> np.fft.hfft(signal, 6) # Input entire signal and truncate + array([15., -4., 0., -1., 0., -4.]) + + + >>> signal = np.array([[1, 1.j], [-1.j, 2]]) + >>> np.conj(signal.T) - signal # check Hermitian symmetry + array([[ 0.-0.j, -0.+0.j], # may vary + [ 0.+0.j, 0.-0.j]]) + >>> freq_spectrum = np.fft.hfft(signal) + >>> freq_spectrum + array([[ 1., 1.], + [ 2., -2.]]) + + """ + a = asarray(a) + if n is None: + n = (a.shape[axis] - 1) * 2 + new_norm = _swap_direction(norm) + output = irfft(conjugate(a), n, axis, norm=new_norm, out=None) + return output + + +@array_function_dispatch(_fft_dispatcher) +def ihfft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the inverse FFT of a signal that has Hermitian symmetry. + + Parameters + ---------- + a : array_like + Input array. + n : int, optional + Length of the inverse FFT, the number of points along + transformation axis in the input to use. If `n` is smaller than + the length of the input, the input is cropped. If it is larger, + the input is padded with zeros. If `n` is not given, the length of + the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is ``n//2 + 1``. + + See also + -------- + hfft, irfft + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd: + + * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error. + + Examples + -------- + >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) + >>> np.fft.ifft(spectrum) + array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary + >>> np.fft.ihfft(spectrum) + array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + new_norm = _swap_direction(norm) + out = rfft(a, n, axis, norm=new_norm, out=out) + return conjugate(out, out=out) + + +def _cook_nd_args(a, s=None, axes=None, invreal=0): + if s is None: + shapeless = True + if axes is None: + s = list(a.shape) + else: + s = take(a.shape, axes) + else: + shapeless = False + s = list(s) + if axes is None: + if not shapeless: + msg = ("`axes` should not be `None` if `s` is not `None` " + "(Deprecated in NumPy 2.0). In a future version of NumPy, " + "this will raise an error and `s[i]` will correspond to " + "the size along the transformed axis specified by " + "`axes[i]`. To retain current behaviour, pass a sequence " + "[0, ..., k-1] to `axes` for an array of dimension k.") + warnings.warn(msg, DeprecationWarning, stacklevel=3) + axes = list(range(-len(s), 0)) + if len(s) != len(axes): + raise ValueError("Shape and axes have different lengths.") + if invreal and shapeless: + s[-1] = (a.shape[axes[-1]] - 1) * 2 + if None in s: + msg = ("Passing an array containing `None` values to `s` is " + "deprecated in NumPy 2.0 and will raise an error in " + "a future version of NumPy. To use the default behaviour " + "of the corresponding 1-D transform, pass the value matching " + "the default for its `n` parameter. To use the default " + "behaviour for every axis, the `s` argument can be omitted.") + warnings.warn(msg, DeprecationWarning, stacklevel=3) + # use the whole input array along axis `i` if `s[i] == -1` + s = [a.shape[_a] if _s == -1 else _s for _s, _a in zip(s, axes)] + return s, axes + + +def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None, out=None): + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + itl = list(range(len(axes))) + itl.reverse() + for ii in itl: + a = function(a, n=s[ii], axis=axes[ii], norm=norm, out=out) + return a + + +def _fftn_dispatcher(a, s=None, axes=None, norm=None, out=None): + return (a, out) + + +@array_function_dispatch(_fftn_dispatcher) +def fftn(a, s=None, axes=None, norm=None, out=None): + """ + Compute the N-dimensional discrete Fourier Transform. + + This function computes the *N*-dimensional discrete Fourier Transform over + any number of axes in an *M*-dimensional array by means of the Fast Fourier + Transform (FFT). + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the transform over that axis is + performed multiple times. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT. + fft : The one-dimensional FFT, with definitions and conventions used. + rfftn : The *n*-dimensional FFT of real input. + fft2 : The two-dimensional FFT. + fftshift : Shifts zero-frequency terms to centre of array + + Notes + ----- + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of all axes, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + See `numpy.fft` for details, definitions and conventions used. + + Examples + -------- + >>> a = np.mgrid[:3, :3, :3][0] + >>> np.fft.fftn(a, axes=(1, 2)) + array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[ 9.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[18.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) + array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[-2.+0.j, -2.+0.j, -2.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + + >>> import matplotlib.pyplot as plt + >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, + ... 2 * np.pi * np.arange(200) / 34) + >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape) + >>> FS = np.fft.fftn(S) + >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2)) + + >>> plt.show() + + """ + return _raw_fftnd(a, s, axes, fft, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def ifftn(a, s=None, axes=None, norm=None, out=None): + """ + Compute the N-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform over any number of axes in an M-dimensional array by + means of the Fast Fourier Transform (FFT). In other words, + ``ifftn(fftn(a)) == a`` to within numerical accuracy. + For a description of the definitions and conventions used, see `numpy.fft`. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fftn`, i.e. it should have the term for zero frequency + in all axes in the low-order corner, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``ifft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the IFFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. + ifft : The one-dimensional inverse FFT. + ifft2 : The two-dimensional inverse FFT. + ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning + of array. + + Notes + ----- + See `numpy.fft` for definitions and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifftn` is called. + + Examples + -------- + >>> a = np.eye(4) + >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) + + + Create and plot an image with band-limited frequency content: + + >>> import matplotlib.pyplot as plt + >>> n = np.zeros((200,200), dtype=complex) + >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) + >>> im = np.fft.ifftn(n).real + >>> plt.imshow(im) + + >>> plt.show() + + """ + return _raw_fftnd(a, s, axes, ifft, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def fft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Compute the 2-dimensional discrete Fourier Transform. + + This function computes the *n*-dimensional discrete Fourier Transform + over any axes in an *M*-dimensional array by means of the + Fast Fourier Transform (FFT). By default, the transform is computed over + the last two axes of the input array, i.e., a 2-dimensional FFT. + + Parameters + ---------- + a : array_like + Input array, can be complex + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. Default: ``(-2, -1)``. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence only the + last axis can have ``s`` not equal to the shape at that axis). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifft2 : The inverse two-dimensional FFT. + fft : The one-dimensional FFT. + fftn : The *n*-dimensional FFT. + fftshift : Shifts zero-frequency terms to the center of the array. + For two-dimensional input, swaps first and third quadrants, and second + and fourth quadrants. + + Notes + ----- + `fft2` is just `fftn` with a different default for `axes`. + + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of the transformed axes, the positive frequency terms + in the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + the axes, in order of decreasingly negative frequency. + + See `fftn` for details and a plotting example, and `numpy.fft` for + definitions and conventions used. + + + Examples + -------- + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.fft2(a) + array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary + 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ]]) + + """ + return _raw_fftnd(a, s, axes, fft, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def ifft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Compute the 2-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the 2-dimensional discrete Fourier + Transform over any number of axes in an M-dimensional array by means of + the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a`` + to within numerical accuracy. By default, the inverse transform is + computed over the last two axes of the input array. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fft2`, i.e. it should have the term for zero frequency + in the low-order corner of the two axes, the positive frequency terms in + the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + both axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each axis) of the output (``s[0]`` refers to axis 0, + ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. Default: ``(-2, -1)``. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse. + ifftn : The inverse of the *n*-dimensional FFT. + fft : The one-dimensional FFT. + ifft : The one-dimensional inverse FFT. + + Notes + ----- + `ifft2` is just `ifftn` with a different default for `axes`. + + See `ifftn` for details and a plotting example, and `numpy.fft` for + definition and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifft2` is called. + + Examples + -------- + >>> a = 4 * np.eye(4) + >>> np.fft.ifft2(a) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) + + """ + return _raw_fftnd(a, s, axes, ifft, norm, out=None) + + +@array_function_dispatch(_fftn_dispatcher) +def rfftn(a, s=None, axes=None, norm=None, out=None): + """ + Compute the N-dimensional discrete Fourier Transform for real input. + + This function computes the N-dimensional discrete Fourier Transform over + any number of axes in an M-dimensional real array by means of the Fast + Fourier Transform (FFT). By default, all axes are transformed, with the + real transform performed over the last axis, while the remaining + transforms are complex. + + Parameters + ---------- + a : array_like + Input array, taken to be real. + s : sequence of ints, optional + Shape (length along each transformed axis) to use from the input. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + The final element of `s` corresponds to `n` for ``rfft(x, n)``, while + for the remaining axes, it corresponds to `n` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + The length of the last axis transformed will be ``s[-1]//2+1``, + while the remaining transformed axes will have lengths according to + `s`, or unchanged from the input. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT + of real input. + fft : The one-dimensional FFT, with definitions and conventions used. + rfft : The one-dimensional FFT of real input. + fftn : The n-dimensional FFT. + rfft2 : The two-dimensional FFT of real input. + + Notes + ----- + The transform for real input is performed over the last transformation + axis, as by `rfft`, then the transform over the remaining axes is + performed as by `fftn`. The order of the output is as for `rfft` for the + final transformation axis, and as for `fftn` for the remaining + transformation axes. + + See `fft` for details, definitions and conventions used. + + Examples + -------- + >>> a = np.ones((2, 2, 2)) + >>> np.fft.rfftn(a) + array([[[8.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + >>> np.fft.rfftn(a, axes=(2, 0)) + array([[[4.+0.j, 0.+0.j], # may vary + [4.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + """ + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + a = rfft(a, s[-1], axes[-1], norm, out=out) + for ii in range(len(axes)-1): + a = fft(a, s[ii], axes[ii], norm, out=out) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def rfft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Compute the 2-dimensional FFT of a real array. + + Parameters + ---------- + a : array + Input array, taken to be real. + s : sequence of ints, optional + Shape of the FFT. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. Default: ``(-2, -1)``. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for the last inverse transform. + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The result of the real 2-D FFT. + + See Also + -------- + rfftn : Compute the N-dimensional discrete Fourier Transform for real + input. + + Notes + ----- + This is really just `rfftn` with different default behavior. + For more details see `rfftn`. + + Examples + -------- + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.rfft2(a) + array([[ 50. +0.j , 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j ]]) + """ + return rfftn(a, s, axes, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def irfftn(a, s=None, axes=None, norm=None, out=None): + """ + Computes the inverse of `rfftn`. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform for real input over any number of axes in an + M-dimensional array by means of the Fast Fourier Transform (FFT). In + other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical + accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, + and for the same reason.) + + The input should be ordered in the same way as is returned by `rfftn`, + i.e. as for `irfft` for the final transformation axis, and as for `ifftn` + along all the other axes. + + Parameters + ---------- + a : array_like + Input array. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the + number of input points used along this axis, except for the last axis, + where ``s[-1]//2+1`` points of the input are used. + Along any axis, if the shape indicated by `s` is smaller than that of + the input, the input is cropped. If it is larger, the input is padded + with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes + specified by axes is used. Except for the last axis which is taken to + be ``2*(m-1)`` where ``m`` is the length of the input along that axis. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the inverse FFT. If not given, the last + `len(s)` axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for the last transformation. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + The length of each transformed axis is as given by the corresponding + element of `s`, or the length of the input in every axis except for the + last one if `s` is not given. In the final transformed axis the length + of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the + length of the final transformed axis of the input. To get an odd + number of output points in the final axis, `s` must be specified. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + rfftn : The forward n-dimensional FFT of real input, + of which `ifftn` is the inverse. + fft : The one-dimensional FFT, with definitions and conventions used. + irfft : The inverse of the one-dimensional FFT of real input. + irfft2 : The inverse of the two-dimensional FFT of real input. + + Notes + ----- + See `fft` for definitions and conventions used. + + See `rfft` for definitions and conventions used for real input. + + The correct interpretation of the hermitian input depends on the shape of + the original data, as given by `s`. This is because each input shape could + correspond to either an odd or even length signal. By default, `irfftn` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. When performing the + final complex to real transform, the last value is thus treated as purely + real. To avoid losing information, the correct shape of the real input + **must** be given. + + Examples + -------- + >>> a = np.zeros((3, 2, 2)) + >>> a[0, 0, 0] = 3 * 2 * 2 + >>> np.fft.irfftn(a) + array([[[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]]]) + + """ + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes, invreal=1) + for ii in range(len(axes)-1): + a = ifft(a, s[ii], axes[ii], norm) + a = irfft(a, s[-1], axes[-1], norm, out=out) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def irfft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Computes the inverse of `rfft2`. + + Parameters + ---------- + a : array_like + The input array + s : sequence of ints, optional + Shape of the real output to the inverse FFT. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + The axes over which to compute the inverse fft. + Default: ``(-2, -1)``, the last two axes. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for the last transformation. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The result of the inverse real 2-D FFT. + + See Also + -------- + rfft2 : The forward two-dimensional FFT of real input, + of which `irfft2` is the inverse. + rfft : The one-dimensional FFT for real input. + irfft : The inverse of the one-dimensional FFT of real input. + irfftn : Compute the inverse of the N-dimensional FFT of real input. + + Notes + ----- + This is really `irfftn` with different defaults. + For more details see `irfftn`. + + Examples + -------- + >>> a = np.mgrid[:5, :5][0] + >>> A = np.fft.rfft2(a) + >>> np.fft.irfft2(A, s=a.shape) + array([[0., 0., 0., 0., 0.], + [1., 1., 1., 1., 1.], + [2., 2., 2., 2., 2.], + [3., 3., 3., 3., 3.], + [4., 4., 4., 4., 4.]]) + """ + return irfftn(a, s, axes, norm, out=None) diff --git a/phivenv/Lib/site-packages/numpy/fft/_pocketfft.pyi b/phivenv/Lib/site-packages/numpy/fft/_pocketfft.pyi new file mode 100644 index 0000000000000000000000000000000000000000..760319b14987d8614f74cf0886d22b3a7e945353 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/fft/_pocketfft.pyi @@ -0,0 +1,122 @@ +from collections.abc import Sequence +from typing import Literal as L + +from numpy import complex128, float64 +from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co + +_NormKind = L[None, "backward", "ortho", "forward"] + +__all__: list[str] + +def fft( + a: ArrayLike, + n: None | int = ..., + axis: int = ..., + norm: _NormKind = ..., + out: None | NDArray[complex128] = ..., +) -> NDArray[complex128]: ... + +def ifft( + a: ArrayLike, + n: None | int = ..., + axis: int = ..., + norm: _NormKind = ..., + out: None | NDArray[complex128] = ..., +) -> NDArray[complex128]: ... + +def rfft( + a: ArrayLike, + n: None | int = ..., + axis: int = ..., + norm: _NormKind = ..., + out: None | NDArray[complex128] = ..., +) -> NDArray[complex128]: ... + +def irfft( + a: ArrayLike, + n: None | int = ..., + axis: int = ..., + norm: _NormKind = ..., + out: None | NDArray[float64] = ..., +) -> NDArray[float64]: ... + +# Input array must be compatible with `np.conjugate` +def hfft( + a: _ArrayLikeNumber_co, + n: None | int = ..., + axis: int = ..., + norm: _NormKind = ..., + out: None | NDArray[float64] = ..., +) -> NDArray[float64]: ... + +def ihfft( + a: ArrayLike, + n: None | int = ..., + axis: int = ..., + norm: _NormKind = ..., + out: None | NDArray[complex128] = ..., +) -> NDArray[complex128]: ... + +def fftn( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., + out: None | NDArray[complex128] = ..., +) -> NDArray[complex128]: ... + +def ifftn( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., + out: None | NDArray[complex128] = ..., +) -> NDArray[complex128]: ... + +def rfftn( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., + out: None | NDArray[complex128] = ..., +) -> NDArray[complex128]: ... + +def irfftn( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., + out: None | NDArray[float64] = ..., +) -> NDArray[float64]: ... + +def fft2( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., + out: None | NDArray[complex128] = ..., +) -> NDArray[complex128]: ... + +def ifft2( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., + out: None | NDArray[complex128] = ..., +) -> NDArray[complex128]: ... + +def rfft2( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., + out: None | NDArray[complex128] = ..., +) -> NDArray[complex128]: ... + +def irfft2( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., + out: None | NDArray[float64] = ..., +) -> NDArray[float64]: ... diff --git a/phivenv/Lib/site-packages/numpy/fft/_pocketfft_umath.cp39-win_amd64.lib b/phivenv/Lib/site-packages/numpy/fft/_pocketfft_umath.cp39-win_amd64.lib new file mode 100644 index 0000000000000000000000000000000000000000..810f930c7ea0cac1551a4f8b4d986bff7c906884 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/fft/_pocketfft_umath.cp39-win_amd64.lib differ diff --git a/phivenv/Lib/site-packages/numpy/fft/helper.py b/phivenv/Lib/site-packages/numpy/fft/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..bdcbd3edf02d959bd93d0f4f432ed4c0e04b6cde --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/fft/helper.py @@ -0,0 +1,16 @@ +def __getattr__(attr_name): + import warnings + from numpy.fft import _helper + ret = getattr(_helper, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.fft.helper' has no attribute {attr_name}") + warnings.warn( + "The numpy.fft.helper has been made private and renamed to " + "numpy.fft._helper. All four functions exported by it (i.e. fftshift, " + "ifftshift, fftfreq, rfftfreq) are available from numpy.fft. " + f"Please use numpy.fft.{attr_name} instead.", + DeprecationWarning, + stacklevel=3 + ) + return ret diff --git a/phivenv/Lib/site-packages/numpy/fft/tests/__init__.py b/phivenv/Lib/site-packages/numpy/fft/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a91fa224ff08ddbd4aa6f7849309c2b644a2708b Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f70e74c124a916e4fcbdb08e0cdaca4819f3574 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c949c38a847af7c6443922384c0ae6c6ec4c2324 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/fft/tests/test_helper.py b/phivenv/Lib/site-packages/numpy/fft/tests/test_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..44c6a3357f20d37682d20fb2959742b7a1a6b812 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/fft/tests/test_helper.py @@ -0,0 +1,167 @@ +"""Test functions for fftpack.helper module + +Copied from fftpack.helper by Pearu Peterson, October 2005 + +""" +import numpy as np +from numpy.testing import assert_array_almost_equal +from numpy import fft, pi + + +class TestFFTShift: + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + y = [-4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + + def test_inverse(self): + for n in [1, 4, 9, 100, 211]: + x = np.random.random((n,)) + assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x) + + def test_axes_keyword(self): + freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]] + shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted) + assert_array_almost_equal(fft.fftshift(freqs, axes=0), + fft.fftshift(freqs, axes=(0,))) + assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.ifftshift(shifted, axes=0), + fft.ifftshift(shifted, axes=(0,))) + + assert_array_almost_equal(fft.fftshift(freqs), shifted) + assert_array_almost_equal(fft.ifftshift(shifted), freqs) + + def test_uneven_dims(self): + """ Test 2D input, which has uneven dimension sizes """ + freqs = [ + [0, 1], + [2, 3], + [4, 5] + ] + + # shift in dimension 0 + shift_dim0 = [ + [4, 5], + [0, 1], + [2, 3] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs) + + # shift in dimension 1 + shift_dim1 = [ + [1, 0], + [3, 2], + [5, 4] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1) + assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs) + + # shift in both dimensions + shift_dim_both = [ + [5, 4], + [1, 0], + [3, 2] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs) + + # axes=None (default) shift in all dimensions + assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs) + assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs) + + def test_equal_to_original(self): + """ Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """ + from numpy._core import asarray, concatenate, arange, take + + def original_fftshift(x, axes=None): + """ How fftshift was implemented in v1.14""" + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, int): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + def original_ifftshift(x, axes=None): + """ How ifftshift was implemented in v1.14 """ + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, int): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = n - (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + # create possible 2d array combinations and try all possible keywords + # compare output to original functions + for i in range(16): + for j in range(16): + for axes_keyword in [0, 1, None, (0,), (0, 1)]: + inp = np.random.rand(i, j) + + assert_array_almost_equal(fft.fftshift(inp, axes_keyword), + original_fftshift(inp, axes_keyword)) + + assert_array_almost_equal(fft.ifftshift(inp, axes_keyword), + original_ifftshift(inp, axes_keyword)) + + +class TestFFTFreq: + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + assert_array_almost_equal(9*fft.fftfreq(9), x) + assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + assert_array_almost_equal(10*fft.fftfreq(10), x) + assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x) + + +class TestRFFTFreq: + + def test_definition(self): + x = [0, 1, 2, 3, 4] + assert_array_almost_equal(9*fft.rfftfreq(9), x) + assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, 5] + assert_array_almost_equal(10*fft.rfftfreq(10), x) + assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x) + + +class TestIRFFTN: + + def test_not_last_axis_success(self): + ar, ai = np.random.random((2, 16, 8, 32)) + a = ar + 1j*ai + + axes = (-2,) + + # Should not raise error + fft.irfftn(a, axes=axes) diff --git a/phivenv/Lib/site-packages/numpy/fft/tests/test_pocketfft.py b/phivenv/Lib/site-packages/numpy/fft/tests/test_pocketfft.py new file mode 100644 index 0000000000000000000000000000000000000000..a6182880ff1a2ff81f054a26e0c71c8d50b203e6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/fft/tests/test_pocketfft.py @@ -0,0 +1,581 @@ +import numpy as np +import pytest +from numpy.random import random +from numpy.testing import ( + assert_array_equal, assert_raises, assert_allclose, IS_WASM + ) +import threading +import queue + + +def fft1(x): + L = len(x) + phase = -2j * np.pi * (np.arange(L) / L) + phase = np.arange(L).reshape(-1, 1) * phase + return np.sum(x*np.exp(phase), axis=1) + + +class TestFFTShift: + + def test_fft_n(self): + assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0) + + +class TestFFT1D: + + def test_identity(self): + maxlen = 512 + x = random(maxlen) + 1j*random(maxlen) + xr = random(maxlen) + for i in range(1, maxlen): + assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i], + atol=1e-12) + assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]), i), + xr[0:i], atol=1e-12) + + @pytest.mark.parametrize("dtype", [np.single, np.double, np.longdouble]) + def test_identity_long_short(self, dtype): + # Test with explicitly given number of points, both for n + # smaller and for n larger than the input size. + maxlen = 16 + atol = 5 * np.spacing(np.array(1., dtype=dtype)) + x = random(maxlen).astype(dtype) + 1j*random(maxlen).astype(dtype) + xx = np.concatenate([x, np.zeros_like(x)]) + xr = random(maxlen).astype(dtype) + xxr = np.concatenate([xr, np.zeros_like(xr)]) + for i in range(1, maxlen*2): + check_c = np.fft.ifft(np.fft.fft(x, n=i), n=i) + assert check_c.real.dtype == dtype + assert_allclose(check_c, xx[0:i], atol=atol, rtol=0) + check_r = np.fft.irfft(np.fft.rfft(xr, n=i), n=i) + assert check_r.dtype == dtype + assert_allclose(check_r, xxr[0:i], atol=atol, rtol=0) + + @pytest.mark.parametrize("dtype", [np.single, np.double, np.longdouble]) + def test_identity_long_short_reversed(self, dtype): + # Also test explicitly given number of points in reversed order. + maxlen = 16 + atol = 5 * np.spacing(np.array(1., dtype=dtype)) + x = random(maxlen).astype(dtype) + 1j*random(maxlen).astype(dtype) + xx = np.concatenate([x, np.zeros_like(x)]) + for i in range(1, maxlen*2): + check_via_c = np.fft.fft(np.fft.ifft(x, n=i), n=i) + assert check_via_c.dtype == x.dtype + assert_allclose(check_via_c, xx[0:i], atol=atol, rtol=0) + # For irfft, we can neither recover the imaginary part of + # the first element, nor the imaginary part of the last + # element if npts is even. So, set to 0 for the comparison. + y = x.copy() + n = i // 2 + 1 + y.imag[0] = 0 + if i % 2 == 0: + y.imag[n-1:] = 0 + yy = np.concatenate([y, np.zeros_like(y)]) + check_via_r = np.fft.rfft(np.fft.irfft(x, n=i), n=i) + assert check_via_r.dtype == x.dtype + assert_allclose(check_via_r, yy[0:n], atol=atol, rtol=0) + + def test_fft(self): + x = random(30) + 1j*random(30) + assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6) + assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6) + assert_allclose(fft1(x) / np.sqrt(30), + np.fft.fft(x, norm="ortho"), atol=1e-6) + assert_allclose(fft1(x) / 30., + np.fft.fft(x, norm="forward"), atol=1e-6) + + @pytest.mark.parametrize("axis", (0, 1)) + @pytest.mark.parametrize("dtype", (complex, float)) + @pytest.mark.parametrize("transpose", (True, False)) + def test_fft_out_argument(self, dtype, transpose, axis): + def zeros_like(x): + if transpose: + return np.zeros_like(x.T).T + else: + return np.zeros_like(x) + + # tests below only test the out parameter + if dtype is complex: + y = random((10, 20)) + 1j*random((10, 20)) + fft, ifft = np.fft.fft, np.fft.ifft + else: + y = random((10, 20)) + fft, ifft = np.fft.rfft, np.fft.irfft + + expected = fft(y, axis=axis) + out = zeros_like(expected) + result = fft(y, out=out, axis=axis) + assert result is out + assert_array_equal(result, expected) + + expected2 = ifft(expected, axis=axis) + out2 = out if dtype is complex else zeros_like(expected2) + result2 = ifft(out, out=out2, axis=axis) + assert result2 is out2 + assert_array_equal(result2, expected2) + + @pytest.mark.parametrize("axis", [0, 1]) + def test_fft_inplace_out(self, axis): + # Test some weirder in-place combinations + y = random((20, 20)) + 1j*random((20, 20)) + # Fully in-place. + y1 = y.copy() + expected1 = np.fft.fft(y1, axis=axis) + result1 = np.fft.fft(y1, axis=axis, out=y1) + assert result1 is y1 + assert_array_equal(result1, expected1) + # In-place of part of the array; rest should be unchanged. + y2 = y.copy() + out2 = y2[:10] if axis == 0 else y2[:, :10] + expected2 = np.fft.fft(y2, n=10, axis=axis) + result2 = np.fft.fft(y2, n=10, axis=axis, out=out2) + assert result2 is out2 + assert_array_equal(result2, expected2) + if axis == 0: + assert_array_equal(y2[10:], y[10:]) + else: + assert_array_equal(y2[:, 10:], y[:, 10:]) + # In-place of another part of the array. + y3 = y.copy() + y3_sel = y3[5:] if axis == 0 else y3[:, 5:] + out3 = y3[5:15] if axis == 0 else y3[:, 5:15] + expected3 = np.fft.fft(y3_sel, n=10, axis=axis) + result3 = np.fft.fft(y3_sel, n=10, axis=axis, out=out3) + assert result3 is out3 + assert_array_equal(result3, expected3) + if axis == 0: + assert_array_equal(y3[:5], y[:5]) + assert_array_equal(y3[15:], y[15:]) + else: + assert_array_equal(y3[:, :5], y[:, :5]) + assert_array_equal(y3[:, 15:], y[:, 15:]) + # In-place with n > nin; rest should be unchanged. + y4 = y.copy() + y4_sel = y4[:10] if axis == 0 else y4[:, :10] + out4 = y4[:15] if axis == 0 else y4[:, :15] + expected4 = np.fft.fft(y4_sel, n=15, axis=axis) + result4 = np.fft.fft(y4_sel, n=15, axis=axis, out=out4) + assert result4 is out4 + assert_array_equal(result4, expected4) + if axis == 0: + assert_array_equal(y4[15:], y[15:]) + else: + assert_array_equal(y4[:, 15:], y[:, 15:]) + # Overwrite in a transpose. + y5 = y.copy() + out5 = y5.T + result5 = np.fft.fft(y5, axis=axis, out=out5) + assert result5 is out5 + assert_array_equal(result5, expected1) + # Reverse strides. + y6 = y.copy() + out6 = y6[::-1] if axis == 0 else y6[:, ::-1] + result6 = np.fft.fft(y6, axis=axis, out=out6) + assert result6 is out6 + assert_array_equal(result6, expected1) + + def test_fft_bad_out(self): + x = np.arange(30.) + with pytest.raises(TypeError, match="must be of ArrayType"): + np.fft.fft(x, out="") + with pytest.raises(ValueError, match="has wrong shape"): + np.fft.fft(x, out=np.zeros_like(x).reshape(5, -1)) + with pytest.raises(TypeError, match="Cannot cast"): + np.fft.fft(x, out=np.zeros_like(x, dtype=float)) + + @pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward')) + def test_ifft(self, norm): + x = random(30) + 1j*random(30) + assert_allclose( + x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm), + atol=1e-6) + # Ensure we get the correct error message + with pytest.raises(ValueError, + match='Invalid number of FFT data points'): + np.fft.ifft([], norm=norm) + + def test_fft2(self): + x = random((30, 20)) + 1j*random((30, 20)) + assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0), + np.fft.fft2(x), atol=1e-6) + assert_allclose(np.fft.fft2(x), + np.fft.fft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20), + np.fft.fft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.fft2(x) / (30. * 20.), + np.fft.fft2(x, norm="forward"), atol=1e-6) + + def test_ifft2(self): + x = random((30, 20)) + 1j*random((30, 20)) + assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0), + np.fft.ifft2(x), atol=1e-6) + assert_allclose(np.fft.ifft2(x), + np.fft.ifft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20), + np.fft.ifft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.ifft2(x) * (30. * 20.), + np.fft.ifft2(x, norm="forward"), atol=1e-6) + + def test_fftn(self): + x = random((30, 20, 10)) + 1j*random((30, 20, 10)) + assert_allclose( + np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0), + np.fft.fftn(x), atol=1e-6) + assert_allclose(np.fft.fftn(x), + np.fft.fftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10), + np.fft.fftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.fftn(x) / (30. * 20. * 10.), + np.fft.fftn(x, norm="forward"), atol=1e-6) + + def test_ifftn(self): + x = random((30, 20, 10)) + 1j*random((30, 20, 10)) + assert_allclose( + np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0), + np.fft.ifftn(x), atol=1e-6) + assert_allclose(np.fft.ifftn(x), + np.fft.ifftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10), + np.fft.ifftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.ifftn(x) * (30. * 20. * 10.), + np.fft.ifftn(x, norm="forward"), atol=1e-6) + + def test_rfft(self): + x = random(30) + for n in [x.size, 2*x.size]: + for norm in [None, 'backward', 'ortho', 'forward']: + assert_allclose( + np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)], + np.fft.rfft(x, n=n, norm=norm), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n), + np.fft.rfft(x, n=n, norm="backward"), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n) / np.sqrt(n), + np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n) / n, + np.fft.rfft(x, n=n, norm="forward"), atol=1e-6) + + def test_rfft_even(self): + x = np.arange(8) + n = 4 + y = np.fft.rfft(x, n) + assert_allclose(y, np.fft.fft(x[:n])[:n//2 + 1], rtol=1e-14) + + def test_rfft_odd(self): + x = np.array([1, 0, 2, 3, -3]) + y = np.fft.rfft(x) + assert_allclose(y, np.fft.fft(x)[:3], rtol=1e-14) + + def test_irfft(self): + x = random(30) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_rfft2(self): + x = random((30, 20)) + assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6) + assert_allclose(np.fft.rfft2(x), + np.fft.rfft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20), + np.fft.rfft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.rfft2(x) / (30. * 20.), + np.fft.rfft2(x, norm="forward"), atol=1e-6) + + def test_irfft2(self): + x = random((30, 20)) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_rfftn(self): + x = random((30, 20, 10)) + assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6) + assert_allclose(np.fft.rfftn(x), + np.fft.rfftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10), + np.fft.rfftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.), + np.fft.rfftn(x, norm="forward"), atol=1e-6) + + def test_irfftn(self): + x = random((30, 20, 10)) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_hfft(self): + x = random(14) + 1j*random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm), + np.fft.hfft(x_herm, norm="backward"), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30), + np.fft.hfft(x_herm, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm) / 30., + np.fft.hfft(x_herm, norm="forward"), atol=1e-6) + + def test_ihfft(self): + x = random(14) + 1j*random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="backward"), norm="backward"), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="ortho"), norm="ortho"), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="forward"), norm="forward"), atol=1e-6) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn]) + def test_axes(self, op): + x = random((30, 20, 10)) + axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)] + for a in axes: + op_tr = op(np.transpose(x, a)) + tr_op = np.transpose(op(x, axes=a), a) + assert_allclose(op_tr, tr_op, atol=1e-6) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.fft2, np.fft.ifft2]) + def test_s_negative_1(self, op): + x = np.arange(100).reshape(10, 10) + # should use the whole input array along the first axis + assert op(x, s=(-1, 5), axes=(0, 1)).shape == (10, 5) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn]) + def test_s_axes_none(self, op): + x = np.arange(100).reshape(10, 10) + with pytest.warns(match='`axes` should not be `None` if `s`'): + op(x, s=(-1, 5)) + + @pytest.mark.parametrize("op", [np.fft.fft2, np.fft.ifft2]) + def test_s_axes_none_2D(self, op): + x = np.arange(100).reshape(10, 10) + with pytest.warns(match='`axes` should not be `None` if `s`'): + op(x, s=(-1, 5), axes=None) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn, + np.fft.fft2, np.fft.ifft2]) + def test_s_contains_none(self, op): + x = random((30, 20, 10)) + with pytest.warns(match='array containing `None` values to `s`'): + op(x, s=(10, None, 10), axes=(0, 1, 2)) + + def test_all_1d_norm_preserving(self): + # verify that round-trip transforms are norm-preserving + x = random(30) + x_norm = np.linalg.norm(x) + n = x.size * 2 + func_pairs = [(np.fft.fft, np.fft.ifft), + (np.fft.rfft, np.fft.irfft), + # hfft: order so the first function takes x.size samples + # (necessary for comparison to x_norm above) + (np.fft.ihfft, np.fft.hfft), + ] + for forw, back in func_pairs: + for n in [x.size, 2*x.size]: + for norm in [None, 'backward', 'ortho', 'forward']: + tmp = forw(x, n=n, norm=norm) + tmp = back(tmp, n=n, norm=norm) + assert_allclose(x_norm, + np.linalg.norm(tmp), atol=1e-6) + + @pytest.mark.parametrize("axes", [(0, 1), (0, 2), None]) + @pytest.mark.parametrize("dtype", (complex, float)) + @pytest.mark.parametrize("transpose", (True, False)) + def test_fftn_out_argument(self, dtype, transpose, axes): + def zeros_like(x): + if transpose: + return np.zeros_like(x.T).T + else: + return np.zeros_like(x) + + # tests below only test the out parameter + if dtype is complex: + x = random((10, 5, 6)) + 1j*random((10, 5, 6)) + fft, ifft = np.fft.fftn, np.fft.ifftn + else: + x = random((10, 5, 6)) + fft, ifft = np.fft.rfftn, np.fft.irfftn + + expected = fft(x, axes=axes) + out = zeros_like(expected) + result = fft(x, out=out, axes=axes) + assert result is out + assert_array_equal(result, expected) + + expected2 = ifft(expected, axes=axes) + out2 = out if dtype is complex else zeros_like(expected2) + result2 = ifft(out, out=out2, axes=axes) + assert result2 is out2 + assert_array_equal(result2, expected2) + + @pytest.mark.parametrize("fft", [np.fft.fftn, np.fft.ifftn, np.fft.rfftn]) + def test_fftn_out_and_s_interaction(self, fft): + # With s, shape varies, so generally one cannot pass in out. + if fft is np.fft.rfftn: + x = random((10, 5, 6)) + else: + x = random((10, 5, 6)) + 1j*random((10, 5, 6)) + with pytest.raises(ValueError, match="has wrong shape"): + fft(x, out=np.zeros_like(x), s=(3, 3, 3), axes=(0, 1, 2)) + # Except on the first axis done (which is the last of axes). + s = (10, 5, 5) + expected = fft(x, s=s, axes=(0, 1, 2)) + out = np.zeros_like(expected) + result = fft(x, s=s, axes=(0, 1, 2), out=out) + assert result is out + assert_array_equal(result, expected) + + @pytest.mark.parametrize("s", [(9, 5, 5), (3, 3, 3)]) + def test_irfftn_out_and_s_interaction(self, s): + # Since for irfftn, the output is real and thus cannot be used for + # intermediate steps, it should always work. + x = random((9, 5, 6, 2)) + 1j*random((9, 5, 6, 2)) + expected = np.fft.irfftn(x, s=s, axes=(0, 1, 2)) + out = np.zeros_like(expected) + result = np.fft.irfftn(x, s=s, axes=(0, 1, 2), out=out) + assert result is out + assert_array_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype", + [np.float32, np.float64, np.complex64, np.complex128]) +@pytest.mark.parametrize("order", ["F", 'non-contiguous']) +@pytest.mark.parametrize( + "fft", + [np.fft.fft, np.fft.fft2, np.fft.fftn, + np.fft.ifft, np.fft.ifft2, np.fft.ifftn]) +def test_fft_with_order(dtype, order, fft): + # Check that FFT/IFFT produces identical results for C, Fortran and + # non contiguous arrays + rng = np.random.RandomState(42) + X = rng.rand(8, 7, 13).astype(dtype, copy=False) + # See discussion in pull/14178 + _tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps + if order == 'F': + Y = np.asfortranarray(X) + else: + # Make a non contiguous array + Y = X[::-1] + X = np.ascontiguousarray(X[::-1]) + + if fft.__name__.endswith('fft'): + for axis in range(3): + X_res = fft(X, axis=axis) + Y_res = fft(Y, axis=axis) + assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) + elif fft.__name__.endswith(('fft2', 'fftn')): + axes = [(0, 1), (1, 2), (0, 2)] + if fft.__name__.endswith('fftn'): + axes.extend([(0,), (1,), (2,), None]) + for ax in axes: + X_res = fft(X, axes=ax) + Y_res = fft(Y, axes=ax) + assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) + else: + raise ValueError() + + +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("n", [None, 7, 12]) +def test_fft_output_order(order, n): + rng = np.random.RandomState(42) + x = rng.rand(10) + x = np.asarray(x, dtype=np.complex64, order=order) + res = np.fft.fft(x, n=n) + assert res.flags.c_contiguous == x.flags.c_contiguous + assert res.flags.f_contiguous == x.flags.f_contiguous + +@pytest.mark.skipif(IS_WASM, reason="Cannot start thread") +class TestFFTThreadSafe: + threads = 16 + input_shape = (800, 200) + + def _test_mtsame(self, func, *args): + def worker(args, q): + q.put(func(*args)) + + q = queue.Queue() + expected = func(*args) + + # Spin off a bunch of threads to call the same function simultaneously + t = [threading.Thread(target=worker, args=(args, q)) + for i in range(self.threads)] + [x.start() for x in t] + + [x.join() for x in t] + # Make sure all threads returned the correct value + for i in range(self.threads): + assert_array_equal(q.get(timeout=5), expected, + 'Function returned wrong value in multithreaded context') + + def test_fft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.fft, a) + + def test_ifft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.ifft, a) + + def test_rfft(self): + a = np.ones(self.input_shape) + self._test_mtsame(np.fft.rfft, a) + + def test_irfft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.irfft, a) + + +def test_irfft_with_n_1_regression(): + # Regression test for gh-25661 + x = np.arange(10) + np.fft.irfft(x, n=1) + np.fft.hfft(x, n=1) + np.fft.irfft(np.array([0], complex), n=10) + + +def test_irfft_with_n_large_regression(): + # Regression test for gh-25679 + x = np.arange(5) * (1 + 1j) + result = np.fft.hfft(x, n=10) + expected = np.array([20., 9.91628173, -11.8819096, 7.1048486, + -6.62459848, 4., -3.37540152, -0.16057669, + 1.8819096, -20.86055364]) + assert_allclose(result, expected) + + +@pytest.mark.parametrize("fft", [ + np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft +]) +@pytest.mark.parametrize("data", [ + np.array([False, True, False]), + np.arange(10, dtype=np.uint8), + np.arange(5, dtype=np.int16), +]) +def test_fft_with_integer_or_bool_input(data, fft): + # Regression test for gh-25819 + result = fft(data) + float_data = data.astype(np.result_type(data, 1.)) + expected = fft(float_data) + assert_array_equal(result, expected) diff --git a/phivenv/Lib/site-packages/numpy/lib/__init__.py b/phivenv/Lib/site-packages/numpy/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ee87f0f8bd5efc0e35f5d7acba231bb2ba34da00 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/__init__.py @@ -0,0 +1,89 @@ +""" +``numpy.lib`` is mostly a space for implementing functions that don't +belong in core or in another NumPy submodule with a clear purpose +(e.g. ``random``, ``fft``, ``linalg``, ``ma``). + +``numpy.lib``'s private submodules contain basic functions that are used by +other public modules and are useful to have in the main name-space. + +""" + +# Public submodules +# Note: recfunctions and (maybe) format are public too, but not imported +from . import array_utils +from . import introspect +from . import mixins +from . import npyio +from . import scimath +from . import stride_tricks + +# Private submodules +# load module names. See https://github.com/networkx/networkx/issues/5838 +from . import _type_check_impl +from . import _index_tricks_impl +from . import _nanfunctions_impl +from . import _function_base_impl +from . import _stride_tricks_impl +from . import _shape_base_impl +from . import _twodim_base_impl +from . import _ufunclike_impl +from . import _histograms_impl +from . import _utils_impl +from . import _arraysetops_impl +from . import _polynomial_impl +from . import _npyio_impl +from . import _arrayterator_impl +from . import _arraypad_impl +from . import _version + +# numpy.lib namespace members +from ._arrayterator_impl import Arrayterator +from ._version import NumpyVersion +from numpy._core._multiarray_umath import add_docstring, tracemalloc_domain +from numpy._core.function_base import add_newdoc + +__all__ = [ + "Arrayterator", "add_docstring", "add_newdoc", "array_utils", + "introspect", "mixins", "NumpyVersion", "npyio", "scimath", + "stride_tricks", "tracemalloc_domain" +] + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester + +def __getattr__(attr): + # Warn for reprecated attributes + import math + import warnings + + if attr == "math": + warnings.warn( + "`np.lib.math` is a deprecated alias for the standard library " + "`math` module (Deprecated Numpy 1.25). Replace usages of " + "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2) + return math + elif attr == "emath": + raise AttributeError( + "numpy.lib.emath was an alias for emath module that was removed " + "in NumPy 2.0. Replace usages of numpy.lib.emath with " + "numpy.emath." + ) + elif attr in ( + "histograms", "type_check", "nanfunctions", "function_base", + "arraypad", "arraysetops", "ufunclike", "utils", "twodim_base", + "shape_base", "polynomial", "index_tricks", + ): + raise AttributeError( + f"numpy.lib.{attr} is now private. If you are using a public " + "function, it should be available in the main numpy namespace, " + "otherwise check the NumPy 2.0 migration guide." + ) + elif attr == "arrayterator": + raise AttributeError( + "numpy.lib.arrayterator submodule is now private. To access " + "Arrayterator class use numpy.lib.Arrayterator." + ) + else: + raise AttributeError("module {!r} has no attribute " + "{!r}".format(__name__, attr)) diff --git a/phivenv/Lib/site-packages/numpy/lib/__init__.pyi b/phivenv/Lib/site-packages/numpy/lib/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0aadfca777f15e862723050f92264b55d12a3905 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/__init__.pyi @@ -0,0 +1,41 @@ +import math as math + +from numpy._pytesttester import PytestTester + +from numpy import ( + ndenumerate as ndenumerate, + ndindex as ndindex, +) + +from numpy.version import version + +from numpy.lib import ( + format as format, + mixins as mixins, + scimath as scimath, + stride_tricks as stride_tricks, + npyio as npyio, + array_utils as array_utils, +) + +from numpy.lib._version import ( + NumpyVersion as NumpyVersion, +) + +from numpy.lib._arrayterator_impl import ( + Arrayterator as Arrayterator, +) + +from numpy._core.multiarray import ( + add_docstring as add_docstring, + tracemalloc_domain as tracemalloc_domain, +) + +from numpy._core.function_base import ( + add_newdoc as add_newdoc, +) + +__all__: list[str] +test: PytestTester + +__version__ = version diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83921908d2b0f8c56a55f87d3527720d4f0a510e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_array_utils_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_array_utils_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4ba758e257e3cf65d04fee3c7e6a4d875112556 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_array_utils_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_arraypad_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_arraypad_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a5f65b36599a1890ccc0a2cd17963a0c80e2a07 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_arraypad_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_arraysetops_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_arraysetops_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6754b6e99fbc8fc43299ab3cd765635706c1f89 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_arraysetops_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_arrayterator_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_arrayterator_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8de35bd0bf782ad110f688c6a7a860404c12103e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_arrayterator_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_datasource.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_datasource.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b5b6c43ffcdc83b9f04816f44eea242594a9c18 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_datasource.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_histograms_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_histograms_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4709658993c4c6426c5c85dba52543dbff727c96 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_histograms_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_index_tricks_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_index_tricks_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dbd48232552ae56e34e6b859038726390055f01 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_index_tricks_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_iotools.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_iotools.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..198d1ccf4ec069eff58cb8df6088310924aa9b7e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_iotools.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_nanfunctions_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_nanfunctions_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97009d48127a1b08f4b00da287c9fd52e8412b05 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_nanfunctions_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_npyio_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_npyio_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a337077396469983b5dad51cf7039e8eb3844cc0 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_npyio_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_polynomial_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_polynomial_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39b2795fa78399687d87758db23b0f932f7d987c Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_polynomial_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_scimath_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_scimath_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5abfc386fca950009bff18249223280397bc230d Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_scimath_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_shape_base_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_shape_base_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..612a8da0102b955c19d01bf255049d0a97bb35d3 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_shape_base_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_stride_tricks_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_stride_tricks_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3327922ef18a13a1248c2efaf114ac7e9c2832d1 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_stride_tricks_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_twodim_base_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_twodim_base_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e49f424330de01181785f5f92c7da5b440c2e5a Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_twodim_base_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_type_check_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_type_check_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe9434a268b8b5cde1c2253a63da1801953c475d Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_type_check_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_ufunclike_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_ufunclike_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a0dd869ea982a97c90aed3c5a33513287629a1f Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_ufunclike_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_user_array_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_user_array_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8507ad7009037cff03b80dea23e9ec31e36f2d57 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_user_array_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_utils_impl.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_utils_impl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09fa99ceab944b5a62c58e56f8dafec08d953d61 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_utils_impl.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/_version.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83a0db6e0483c51b0d6d430632cc50c3d7e7c3d3 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/_version.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/array_utils.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/array_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..191a71390f4a2c0da5679d96483e9ce94f26a0cf Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/array_utils.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/format.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/format.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..995656d4136e1aaf159d4fb543b2b9b5d9ef7708 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/format.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/introspect.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/introspect.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..134de4b46b54055c7a9deac001451da7a7bd4ff2 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/introspect.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/mixins.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/mixins.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef785fd9e022706c5cf8b16136160a85d99e49e2 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/mixins.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/npyio.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/npyio.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51c025de8b0118828f8e23b0c88a67591e609ff4 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/npyio.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/recfunctions.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/recfunctions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4de5211218f235c244c3b7993f9005ac55fad8b2 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/recfunctions.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/scimath.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/scimath.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..184ffdb5cadb884da5bd822666a179c836db43e0 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/scimath.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50bde3ca1a5b6ed7b0132a04700db872d1815901 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/__pycache__/user_array.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/__pycache__/user_array.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..150e0c5ba85d1e219c50fcee9c14ba109d0678c9 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/__pycache__/user_array.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/_array_utils_impl.py b/phivenv/Lib/site-packages/numpy/lib/_array_utils_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..5b981b2f4274a820f9906eff99d72970214de7a0 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_array_utils_impl.py @@ -0,0 +1,61 @@ +""" +Miscellaneous utils. +""" +from numpy._core import asarray +from numpy._core.numeric import normalize_axis_tuple, normalize_axis_index +from numpy._utils import set_module + +__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] + + +@set_module("numpy.lib.array_utils") +def byte_bounds(a): + """ + Returns pointers to the end-points of an array. + + Parameters + ---------- + a : ndarray + Input array. It must conform to the Python-side of the array + interface. + + Returns + ------- + (low, high) : tuple of 2 integers + The first integer is the first byte of the array, the second + integer is just past the last byte of the array. If `a` is not + contiguous it will not use every byte between the (`low`, `high`) + values. + + Examples + -------- + >>> I = np.eye(2, dtype='f'); I.dtype + dtype('float32') + >>> low, high = np.lib.array_utils.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + >>> I = np.eye(2); I.dtype + dtype('float64') + >>> low, high = np.lib.array_utils.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + + """ + ai = a.__array_interface__ + a_data = ai['data'][0] + astrides = ai['strides'] + ashape = ai['shape'] + bytes_a = asarray(a).dtype.itemsize + + a_low = a_high = a_data + if astrides is None: + # contiguous case + a_high += a.size * bytes_a + else: + for shape, stride in zip(ashape, astrides): + if stride < 0: + a_low += (shape-1)*stride + else: + a_high += (shape-1)*stride + a_high += bytes_a + return a_low, a_high diff --git a/phivenv/Lib/site-packages/numpy/lib/_array_utils_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_array_utils_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1e5dc9f73847026e552db53006d94846d1ba4885 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_array_utils_impl.pyi @@ -0,0 +1,25 @@ +from typing import Any, Iterable, Tuple + +from numpy import generic +from numpy.typing import NDArray + +__all__: list[str] + +# NOTE: In practice `byte_bounds` can (potentially) take any object +# implementing the `__array_interface__` protocol. The caveat is +# that certain keys, marked as optional in the spec, must be present for +# `byte_bounds`. This concerns `"strides"` and `"data"`. +def byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ... + +def normalize_axis_tuple( + axis: int | Iterable[int], + ndim: int = ..., + argname: None | str = ..., + allow_duplicate: None | bool = ..., +) -> Tuple[int, int]: ... + +def normalize_axis_index( + axis: int = ..., + ndim: int = ..., + msg_prefix: None | str = ..., +) -> int: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_arraypad_impl.py b/phivenv/Lib/site-packages/numpy/lib/_arraypad_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..ed52974733efb6ffa2b664da5d534321312843ab --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_arraypad_impl.py @@ -0,0 +1,894 @@ +""" +The arraypad module contains a group of functions to pad values onto the edges +of an n-dimensional array. + +""" +import numpy as np +from numpy._core.overrides import array_function_dispatch +from numpy.lib._index_tricks_impl import ndindex + + +__all__ = ['pad'] + + +############################################################################### +# Private utility functions. + + +def _round_if_needed(arr, dtype): + """ + Rounds arr inplace if destination dtype is integer. + + Parameters + ---------- + arr : ndarray + Input array. + dtype : dtype + The dtype of the destination array. + """ + if np.issubdtype(dtype, np.integer): + arr.round(out=arr) + + +def _slice_at_axis(sl, axis): + """ + Construct tuple of slices to slice an array in the given dimension. + + Parameters + ---------- + sl : slice + The slice for the given dimension. + axis : int + The axis to which `sl` is applied. All other dimensions are left + "unsliced". + + Returns + ------- + sl : tuple of slices + A tuple with slices matching `shape` in length. + + Examples + -------- + >>> _slice_at_axis(slice(None, 3, -1), 1) + (slice(None, None, None), slice(None, 3, -1), (...,)) + """ + return (slice(None),) * axis + (sl,) + (...,) + + +def _view_roi(array, original_area_slice, axis): + """ + Get a view of the current region of interest during iterative padding. + + When padding multiple dimensions iteratively corner values are + unnecessarily overwritten multiple times. This function reduces the + working area for the first dimensions so that corners are excluded. + + Parameters + ---------- + array : ndarray + The array with the region of interest. + original_area_slice : tuple of slices + Denotes the area with original values of the unpadded array. + axis : int + The currently padded dimension assuming that `axis` is padded before + `axis` + 1. + + Returns + ------- + roi : ndarray + The region of interest of the original `array`. + """ + axis += 1 + sl = (slice(None),) * axis + original_area_slice[axis:] + return array[sl] + + +def _pad_simple(array, pad_width, fill_value=None): + """ + Pad array on all sides with either a single value or undefined values. + + Parameters + ---------- + array : ndarray + Array to grow. + pad_width : sequence of tuple[int, int] + Pad width on both sides for each dimension in `arr`. + fill_value : scalar, optional + If provided the padded area is filled with this value, otherwise + the pad area left undefined. + + Returns + ------- + padded : ndarray + The padded array with the same dtype as`array`. Its order will default + to C-style if `array` is not F-contiguous. + original_area_slice : tuple + A tuple of slices pointing to the area of the original array. + """ + # Allocate grown array + new_shape = tuple( + left + size + right + for size, (left, right) in zip(array.shape, pad_width) + ) + order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order + padded = np.empty(new_shape, dtype=array.dtype, order=order) + + if fill_value is not None: + padded.fill(fill_value) + + # Copy old array into correct space + original_area_slice = tuple( + slice(left, left + size) + for size, (left, right) in zip(array.shape, pad_width) + ) + padded[original_area_slice] = array + + return padded, original_area_slice + + +def _set_pad_area(padded, axis, width_pair, value_pair): + """ + Set empty-padded area in given dimension. + + Parameters + ---------- + padded : ndarray + Array with the pad area which is modified inplace. + axis : int + Dimension with the pad area to set. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + value_pair : tuple of scalars or ndarrays + Values inserted into the pad area on each side. It must match or be + broadcastable to the shape of `arr`. + """ + left_slice = _slice_at_axis(slice(None, width_pair[0]), axis) + padded[left_slice] = value_pair[0] + + right_slice = _slice_at_axis( + slice(padded.shape[axis] - width_pair[1], None), axis) + padded[right_slice] = value_pair[1] + + +def _get_edges(padded, axis, width_pair): + """ + Retrieve edge values from empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the edges are considered. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + + Returns + ------- + left_edge, right_edge : ndarray + Edge values of the valid area in `padded` in the given dimension. Its + shape will always match `padded` except for the dimension given by + `axis` which will have a length of 1. + """ + left_index = width_pair[0] + left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis) + left_edge = padded[left_slice] + + right_index = padded.shape[axis] - width_pair[1] + right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis) + right_edge = padded[right_slice] + + return left_edge, right_edge + + +def _get_linear_ramps(padded, axis, width_pair, end_value_pair): + """ + Construct linear ramps for empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the ramps are constructed. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + end_value_pair : (scalar, scalar) + End values for the linear ramps which form the edge of the fully padded + array. These values are included in the linear ramps. + + Returns + ------- + left_ramp, right_ramp : ndarray + Linear ramps to set on both sides of `padded`. + """ + edge_pair = _get_edges(padded, axis, width_pair) + + left_ramp, right_ramp = ( + np.linspace( + start=end_value, + stop=edge.squeeze(axis), # Dimension is replaced by linspace + num=width, + endpoint=False, + dtype=padded.dtype, + axis=axis + ) + for end_value, edge, width in zip( + end_value_pair, edge_pair, width_pair + ) + ) + + # Reverse linear space in appropriate dimension + right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)] + + return left_ramp, right_ramp + + +def _get_stats(padded, axis, width_pair, length_pair, stat_func): + """ + Calculate statistic for the empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the statistic is calculated. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + length_pair : 2-element sequence of None or int + Gives the number of values in valid area from each side that is + taken into account when calculating the statistic. If None the entire + valid area in `padded` is considered. + stat_func : function + Function to compute statistic. The expected signature is + ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``. + + Returns + ------- + left_stat, right_stat : ndarray + Calculated statistic for both sides of `padded`. + """ + # Calculate indices of the edges of the area with original values + left_index = width_pair[0] + right_index = padded.shape[axis] - width_pair[1] + # as well as its length + max_length = right_index - left_index + + # Limit stat_lengths to max_length + left_length, right_length = length_pair + if left_length is None or max_length < left_length: + left_length = max_length + if right_length is None or max_length < right_length: + right_length = max_length + + if (left_length == 0 or right_length == 0) \ + and stat_func in {np.amax, np.amin}: + # amax and amin can't operate on an empty array, + # raise a more descriptive warning here instead of the default one + raise ValueError("stat_length of 0 yields no value for padding") + + # Calculate statistic for the left side + left_slice = _slice_at_axis( + slice(left_index, left_index + left_length), axis) + left_chunk = padded[left_slice] + left_stat = stat_func(left_chunk, axis=axis, keepdims=True) + _round_if_needed(left_stat, padded.dtype) + + if left_length == right_length == max_length: + # return early as right_stat must be identical to left_stat + return left_stat, left_stat + + # Calculate statistic for the right side + right_slice = _slice_at_axis( + slice(right_index - right_length, right_index), axis) + right_chunk = padded[right_slice] + right_stat = stat_func(right_chunk, axis=axis, keepdims=True) + _round_if_needed(right_stat, padded.dtype) + + return left_stat, right_stat + + +def _set_reflect_both(padded, axis, width_pair, method, + original_period, include_edge=False): + """ + Pad `axis` of `arr` with reflection. + + Parameters + ---------- + padded : ndarray + Input array of arbitrary shape. + axis : int + Axis along which to pad `arr`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + method : str + Controls method of reflection; options are 'even' or 'odd'. + original_period : int + Original length of data on `axis` of `arr`. + include_edge : bool + If true, edge value is included in reflection, otherwise the edge + value forms the symmetric axis to the reflection. + + Returns + ------- + pad_amt : tuple of ints, length 2 + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. + """ + left_pad, right_pad = width_pair + old_length = padded.shape[axis] - right_pad - left_pad + + if include_edge: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = old_length // original_period * original_period + # Edge is included, we need to offset the pad amount by 1 + edge_offset = 1 + else: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = ((old_length - 1) // (original_period - 1) + * (original_period - 1) + 1) + edge_offset = 0 # Edge is not included, no need to offset pad amount + old_length -= 1 # but must be omitted from the chunk + + if left_pad > 0: + # Pad with reflected values on left side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, left_pad) + # Slice right to left, stop on or next to edge, start relative to stop + stop = left_pad - edge_offset + start = stop + chunk_length + left_slice = _slice_at_axis(slice(start, stop, -1), axis) + left_chunk = padded[left_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis) + left_chunk = 2 * padded[edge_slice] - left_chunk + + # Insert chunk into padded area + start = left_pad - chunk_length + stop = left_pad + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = left_chunk + # Adjust pointer to left edge for next iteration + left_pad -= chunk_length + + if right_pad > 0: + # Pad with reflected values on right side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, right_pad) + # Slice right to left, start on or next to edge, stop relative to start + start = -right_pad + edge_offset - 2 + stop = start - chunk_length + right_slice = _slice_at_axis(slice(start, stop, -1), axis) + right_chunk = padded[right_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis( + slice(-right_pad - 1, -right_pad), axis) + right_chunk = 2 * padded[edge_slice] - right_chunk + + # Insert chunk into padded area + start = padded.shape[axis] - right_pad + stop = start + chunk_length + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = right_chunk + # Adjust pointer to right edge for next iteration + right_pad -= chunk_length + + return left_pad, right_pad + + +def _set_wrap_both(padded, axis, width_pair, original_period): + """ + Pad `axis` of `arr` with wrapped values. + + Parameters + ---------- + padded : ndarray + Input array of arbitrary shape. + axis : int + Axis along which to pad `arr`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + original_period : int + Original length of data on `axis` of `arr`. + + Returns + ------- + pad_amt : tuple of ints, length 2 + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. + """ + left_pad, right_pad = width_pair + period = padded.shape[axis] - right_pad - left_pad + # Avoid wrapping with only a subset of the original area by ensuring period + # can only be a multiple of the original area's length. + period = period // original_period * original_period + + # If the current dimension of `arr` doesn't contain enough valid values + # (not part of the undefined pad area) we need to pad multiple times. + # Each time the pad area shrinks on both sides which is communicated with + # these variables. + new_left_pad = 0 + new_right_pad = 0 + + if left_pad > 0: + # Pad with wrapped values on left side + # First slice chunk from left side of the non-pad area. + # Use min(period, left_pad) to ensure that chunk is not larger than + # pad area. + slice_end = left_pad + period + slice_start = slice_end - min(period, left_pad) + right_slice = _slice_at_axis(slice(slice_start, slice_end), axis) + right_chunk = padded[right_slice] + + if left_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis) + new_left_pad = left_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(None, left_pad), axis) + padded[pad_area] = right_chunk + + if right_pad > 0: + # Pad with wrapped values on right side + # First slice chunk from right side of the non-pad area. + # Use min(period, right_pad) to ensure that chunk is not larger than + # pad area. + slice_start = -right_pad - period + slice_end = slice_start + min(period, right_pad) + left_slice = _slice_at_axis(slice(slice_start, slice_end), axis) + left_chunk = padded[left_slice] + + if right_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis( + slice(-right_pad, -right_pad + period), axis) + new_right_pad = right_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(-right_pad, None), axis) + padded[pad_area] = left_chunk + + return new_left_pad, new_right_pad + + +def _as_pairs(x, ndim, as_index=False): + """ + Broadcast `x` to an array with the shape (`ndim`, 2). + + A helper function for `pad` that prepares and validates arguments like + `pad_width` for iteration in pairs. + + Parameters + ---------- + x : {None, scalar, array-like} + The object to broadcast to the shape (`ndim`, 2). + ndim : int + Number of pairs the broadcasted `x` will have. + as_index : bool, optional + If `x` is not None, try to round each element of `x` to an integer + (dtype `np.intp`) and ensure every element is positive. + + Returns + ------- + pairs : nested iterables, shape (`ndim`, 2) + The broadcasted version of `x`. + + Raises + ------ + ValueError + If `as_index` is True and `x` contains negative elements. + Or if `x` is not broadcastable to the shape (`ndim`, 2). + """ + if x is None: + # Pass through None as a special case, otherwise np.round(x) fails + # with an AttributeError + return ((None, None),) * ndim + + x = np.array(x) + if as_index: + x = np.round(x).astype(np.intp, copy=False) + + if x.ndim < 3: + # Optimization: Possibly use faster paths for cases where `x` has + # only 1 or 2 elements. `np.broadcast_to` could handle these as well + # but is currently slower + + if x.size == 1: + # x was supplied as a single value + x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2 + if as_index and x < 0: + raise ValueError("index can't contain negative values") + return ((x[0], x[0]),) * ndim + + if x.size == 2 and x.shape != (2, 1): + # x was supplied with a single value for each side + # but except case when each dimension has a single value + # which should be broadcasted to a pair, + # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]] + x = x.ravel() # Ensure x[0], x[1] works + if as_index and (x[0] < 0 or x[1] < 0): + raise ValueError("index can't contain negative values") + return ((x[0], x[1]),) * ndim + + if as_index and x.min() < 0: + raise ValueError("index can't contain negative values") + + # Converting the array with `tolist` seems to improve performance + # when iterating and indexing the result (see usage in `pad`) + return np.broadcast_to(x, (ndim, 2)).tolist() + + +def _pad_dispatcher(array, pad_width, mode=None, **kwargs): + return (array,) + + +############################################################################### +# Public functions + + +@array_function_dispatch(_pad_dispatcher, module='numpy') +def pad(array, pad_width, mode='constant', **kwargs): + """ + Pad an array. + + Parameters + ---------- + array : array_like of rank N + The array to pad. + pad_width : {sequence, array_like, int} + Number of values padded to the edges of each axis. + ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths + for each axis. + ``(before, after)`` or ``((before, after),)`` yields same before + and after pad for each axis. + ``(pad,)`` or ``int`` is a shortcut for before = after = pad width + for all axes. + mode : str or function, optional + One of the following string values or a user supplied function. + + 'constant' (default) + Pads with a constant value. + 'edge' + Pads with the edge values of array. + 'linear_ramp' + Pads with the linear ramp between end_value and the + array edge value. + 'maximum' + Pads with the maximum value of all or part of the + vector along each axis. + 'mean' + Pads with the mean value of all or part of the + vector along each axis. + 'median' + Pads with the median value of all or part of the + vector along each axis. + 'minimum' + Pads with the minimum value of all or part of the + vector along each axis. + 'reflect' + Pads with the reflection of the vector mirrored on + the first and last values of the vector along each + axis. + 'symmetric' + Pads with the reflection of the vector mirrored + along the edge of the array. + 'wrap' + Pads with the wrap of the vector along the axis. + The first values are used to pad the end and the + end values are used to pad the beginning. + 'empty' + Pads with undefined values. + + .. versionadded:: 1.17 + + + Padding function, see Notes. + stat_length : sequence or int, optional + Used in 'maximum', 'mean', 'median', and 'minimum'. Number of + values at edge of each axis used to calculate the statistic value. + + ``((before_1, after_1), ... (before_N, after_N))`` unique statistic + lengths for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after statistic lengths for each axis. + + ``(stat_length,)`` or ``int`` is a shortcut for + ``before = after = statistic`` length for all axes. + + Default is ``None``, to use the entire axis. + constant_values : sequence or scalar, optional + Used in 'constant'. The values to set the padded values for each + axis. + + ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants + for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after constants for each axis. + + ``(constant,)`` or ``constant`` is a shortcut for + ``before = after = constant`` for all axes. + + Default is 0. + end_values : sequence or scalar, optional + Used in 'linear_ramp'. The values used for the ending value of the + linear_ramp and that will form the edge of the padded array. + + ``((before_1, after_1), ... (before_N, after_N))`` unique end values + for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after end values for each axis. + + ``(constant,)`` or ``constant`` is a shortcut for + ``before = after = constant`` for all axes. + + Default is 0. + reflect_type : {'even', 'odd'}, optional + Used in 'reflect', and 'symmetric'. The 'even' style is the + default with an unaltered reflection around the edge value. For + the 'odd' style, the extended part of the array is created by + subtracting the reflected values from two times the edge value. + + Returns + ------- + pad : ndarray + Padded array of rank equal to `array` with shape increased + according to `pad_width`. + + Notes + ----- + .. versionadded:: 1.7.0 + + For an array with rank greater than 1, some of the padding of later + axes is calculated from padding of previous axes. This is easiest to + think about with a rank 2 array where the corners of the padded array + are calculated by using padded values from the first axis. + + The padding function, if used, should modify a rank 1 array in-place. It + has the following signature:: + + padding_func(vector, iaxis_pad_width, iaxis, kwargs) + + where + + vector : ndarray + A rank 1 array already padded with zeros. Padded values are + vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:]. + iaxis_pad_width : tuple + A 2-tuple of ints, iaxis_pad_width[0] represents the number of + values padded at the beginning of vector where + iaxis_pad_width[1] represents the number of values padded at + the end of vector. + iaxis : int + The axis currently being calculated. + kwargs : dict + Any keyword arguments the function requires. + + Examples + -------- + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6)) + array([4, 4, 1, ..., 6, 6, 6]) + + >>> np.pad(a, (2, 3), 'edge') + array([1, 1, 1, ..., 5, 5, 5]) + + >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) + array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) + + >>> np.pad(a, (2,), 'maximum') + array([5, 5, 1, 2, 3, 4, 5, 5, 5]) + + >>> np.pad(a, (2,), 'mean') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> np.pad(a, (2,), 'median') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> a = [[1, 2], [3, 4]] + >>> np.pad(a, ((3, 2), (2, 3)), 'minimum') + array([[1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [3, 3, 3, 4, 3, 3, 3], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1]]) + + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2, 3), 'reflect') + array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) + + >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd') + array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + + >>> np.pad(a, (2, 3), 'symmetric') + array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) + + >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd') + array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) + + >>> np.pad(a, (2, 3), 'wrap') + array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) + + >>> def pad_with(vector, pad_width, iaxis, kwargs): + ... pad_value = kwargs.get('padder', 10) + ... vector[:pad_width[0]] = pad_value + ... vector[-pad_width[1]:] = pad_value + >>> a = np.arange(6) + >>> a = a.reshape((2, 3)) + >>> np.pad(a, 2, pad_with) + array([[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]]) + >>> np.pad(a, 2, pad_with, padder=100) + array([[100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 0, 1, 2, 100, 100], + [100, 100, 3, 4, 5, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100]]) + """ + array = np.asarray(array) + pad_width = np.asarray(pad_width) + + if not pad_width.dtype.kind == 'i': + raise TypeError('`pad_width` must be of integral type.') + + # Broadcast to shape (array.ndim, 2) + pad_width = _as_pairs(pad_width, array.ndim, as_index=True) + + if callable(mode): + # Old behavior: Use user-supplied function with np.apply_along_axis + function = mode + # Create a new zero padded array + padded, _ = _pad_simple(array, pad_width, fill_value=0) + # And apply along each axis + + for axis in range(padded.ndim): + # Iterate using ndindex as in apply_along_axis, but assuming that + # function operates inplace on the padded array. + + # view with the iteration axis at the end + view = np.moveaxis(padded, axis, -1) + + # compute indices for the iteration axes, and append a trailing + # ellipsis to prevent 0d arrays decaying to scalars (gh-8642) + inds = ndindex(view.shape[:-1]) + inds = (ind + (Ellipsis,) for ind in inds) + for ind in inds: + function(view[ind], pad_width[axis], axis, kwargs) + + return padded + + # Make sure that no unsupported keywords were passed for the current mode + allowed_kwargs = { + 'empty': [], 'edge': [], 'wrap': [], + 'constant': ['constant_values'], + 'linear_ramp': ['end_values'], + 'maximum': ['stat_length'], + 'mean': ['stat_length'], + 'median': ['stat_length'], + 'minimum': ['stat_length'], + 'reflect': ['reflect_type'], + 'symmetric': ['reflect_type'], + } + try: + unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode]) + except KeyError: + raise ValueError("mode '{}' is not supported".format(mode)) from None + if unsupported_kwargs: + raise ValueError("unsupported keyword arguments for mode '{}': {}" + .format(mode, unsupported_kwargs)) + + stat_functions = {"maximum": np.amax, "minimum": np.amin, + "mean": np.mean, "median": np.median} + + # Create array with final shape and original values + # (padded area is undefined) + padded, original_area_slice = _pad_simple(array, pad_width) + # And prepare iteration over all dimensions + # (zipping may be more readable than using enumerate) + axes = range(padded.ndim) + + if mode == "constant": + values = kwargs.get("constant_values", 0) + values = _as_pairs(values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, values): + roi = _view_roi(padded, original_area_slice, axis) + _set_pad_area(roi, axis, width_pair, value_pair) + + elif mode == "empty": + pass # Do nothing as _pad_simple already returned the correct result + + elif array.size == 0: + # Only modes "constant" and "empty" can extend empty axes, all other + # modes depend on `array` not being empty + # -> ensure every empty axis is only "padded with 0" + for axis, width_pair in zip(axes, pad_width): + if array.shape[axis] == 0 and any(width_pair): + raise ValueError( + "can't extend empty axis {} using modes other than " + "'constant' or 'empty'".format(axis) + ) + # passed, don't need to do anything more as _pad_simple already + # returned the correct result + + elif mode == "edge": + for axis, width_pair in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + edge_pair = _get_edges(roi, axis, width_pair) + _set_pad_area(roi, axis, width_pair, edge_pair) + + elif mode == "linear_ramp": + end_values = kwargs.get("end_values", 0) + end_values = _as_pairs(end_values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, end_values): + roi = _view_roi(padded, original_area_slice, axis) + ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair) + _set_pad_area(roi, axis, width_pair, ramp_pair) + + elif mode in stat_functions: + func = stat_functions[mode] + length = kwargs.get("stat_length", None) + length = _as_pairs(length, padded.ndim, as_index=True) + for axis, width_pair, length_pair in zip(axes, pad_width, length): + roi = _view_roi(padded, original_area_slice, axis) + stat_pair = _get_stats(roi, axis, width_pair, length_pair, func) + _set_pad_area(roi, axis, width_pair, stat_pair) + + elif mode in {"reflect", "symmetric"}: + method = kwargs.get("reflect_type", "even") + include_edge = True if mode == "symmetric" else False + for axis, (left_index, right_index) in zip(axes, pad_width): + if array.shape[axis] == 1 and (left_index > 0 or right_index > 0): + # Extending singleton dimension for 'reflect' is legacy + # behavior; it really should raise an error. + edge_pair = _get_edges(padded, axis, (left_index, right_index)) + _set_pad_area( + padded, axis, (left_index, right_index), edge_pair) + continue + + roi = _view_roi(padded, original_area_slice, axis) + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with reflected + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_reflect_both( + roi, axis, (left_index, right_index), + method, array.shape[axis], include_edge + ) + + elif mode == "wrap": + for axis, (left_index, right_index) in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + original_period = padded.shape[axis] - right_index - left_index + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with wrapped + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_wrap_both( + roi, axis, (left_index, right_index), original_period) + + return padded diff --git a/phivenv/Lib/site-packages/numpy/lib/_arraypad_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_arraypad_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9cc0cbf5bda3908e6c45eab430646b3aff0e9c32 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_arraypad_impl.pyi @@ -0,0 +1,85 @@ +from typing import ( + Literal as L, + Any, + overload, + TypeVar, + Protocol, +) + +from numpy import generic + +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLikeInt, + _ArrayLike, +) + +_SCT = TypeVar("_SCT", bound=generic) + +class _ModeFunc(Protocol): + def __call__( + self, + vector: NDArray[Any], + iaxis_pad_width: tuple[int, int], + iaxis: int, + kwargs: dict[str, Any], + /, + ) -> None: ... + +_ModeKind = L[ + "constant", + "edge", + "linear_ramp", + "maximum", + "mean", + "median", + "minimum", + "reflect", + "symmetric", + "wrap", + "empty", +] + +__all__: list[str] + +# TODO: In practice each keyword argument is exclusive to one or more +# specific modes. Consider adding more overloads to express this in the future. + +# Expand `**kwargs` into explicit keyword-only arguments +@overload +def pad( + array: _ArrayLike[_SCT], + pad_width: _ArrayLikeInt, + mode: _ModeKind = ..., + *, + stat_length: None | _ArrayLikeInt = ..., + constant_values: ArrayLike = ..., + end_values: ArrayLike = ..., + reflect_type: L["odd", "even"] = ..., +) -> NDArray[_SCT]: ... +@overload +def pad( + array: ArrayLike, + pad_width: _ArrayLikeInt, + mode: _ModeKind = ..., + *, + stat_length: None | _ArrayLikeInt = ..., + constant_values: ArrayLike = ..., + end_values: ArrayLike = ..., + reflect_type: L["odd", "even"] = ..., +) -> NDArray[Any]: ... +@overload +def pad( + array: _ArrayLike[_SCT], + pad_width: _ArrayLikeInt, + mode: _ModeFunc, + **kwargs: Any, +) -> NDArray[_SCT]: ... +@overload +def pad( + array: ArrayLike, + pad_width: _ArrayLikeInt, + mode: _ModeFunc, + **kwargs: Any, +) -> NDArray[Any]: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_arraysetops_impl.py b/phivenv/Lib/site-packages/numpy/lib/_arraysetops_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..eed362be1bb4db657b495557d7c517645017a089 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_arraysetops_impl.py @@ -0,0 +1,1186 @@ +""" +Set operations for arrays based on sorting. + +Notes +----- + +For floating point arrays, inaccurate results may appear due to usual round-off +and floating point comparison issues. + +Speed could be gained in some operations by an implementation of +`numpy.sort`, that can provide directly the permutation vectors, thus avoiding +calls to `numpy.argsort`. + +Original author: Robert Cimrman + +""" +import functools +import warnings +from typing import NamedTuple + +import numpy as np +from numpy._core import overrides +from numpy._core._multiarray_umath import _array_converter + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", + "union1d", "unique", "unique_all", "unique_counts", "unique_inverse", + "unique_values" +] + + +def _ediff1d_dispatcher(ary, to_end=None, to_begin=None): + return (ary, to_end, to_begin) + + +@array_function_dispatch(_ediff1d_dispatcher) +def ediff1d(ary, to_end=None, to_begin=None): + """ + The differences between consecutive elements of an array. + + Parameters + ---------- + ary : array_like + If necessary, will be flattened before the differences are taken. + to_end : array_like, optional + Number(s) to append at the end of the returned differences. + to_begin : array_like, optional + Number(s) to prepend at the beginning of the returned differences. + + Returns + ------- + ediff1d : ndarray + The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. + + See Also + -------- + diff, gradient + + Notes + ----- + When applied to masked arrays, this function drops the mask information + if the `to_begin` and/or `to_end` parameters are used. + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.ediff1d(x) + array([ 1, 2, 3, -7]) + + >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + array([-99, 1, 2, ..., -7, 88, 99]) + + The returned array is always 1D. + + >>> y = [[1, 2, 4], [1, 6, 24]] + >>> np.ediff1d(y) + array([ 1, 2, -3, 5, 18]) + + """ + conv = _array_converter(ary) + # Convert to (any) array and ravel: + ary = conv[0].ravel() + + # enforce that the dtype of `ary` is used for the output + dtype_req = ary.dtype + + # fast track default case + if to_begin is None and to_end is None: + return ary[1:] - ary[:-1] + + if to_begin is None: + l_begin = 0 + else: + to_begin = np.asanyarray(to_begin) + if not np.can_cast(to_begin, dtype_req, casting="same_kind"): + raise TypeError("dtype of `to_begin` must be compatible " + "with input `ary` under the `same_kind` rule.") + + to_begin = to_begin.ravel() + l_begin = len(to_begin) + + if to_end is None: + l_end = 0 + else: + to_end = np.asanyarray(to_end) + if not np.can_cast(to_end, dtype_req, casting="same_kind"): + raise TypeError("dtype of `to_end` must be compatible " + "with input `ary` under the `same_kind` rule.") + + to_end = to_end.ravel() + l_end = len(to_end) + + # do the calculation in place and copy to_begin and to_end + l_diff = max(len(ary) - 1, 0) + result = np.empty_like(ary, shape=l_diff + l_begin + l_end) + + if l_begin > 0: + result[:l_begin] = to_begin + if l_end > 0: + result[l_begin + l_diff:] = to_end + np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff]) + + return conv.wrap(result) + + +def _unpack_tuple(x): + """ Unpacks one-element tuples for use as return values """ + if len(x) == 1: + return x[0] + else: + return x + + +def _unique_dispatcher(ar, return_index=None, return_inverse=None, + return_counts=None, axis=None, *, equal_nan=None): + return (ar,) + + +@array_function_dispatch(_unique_dispatcher) +def unique(ar, return_index=False, return_inverse=False, + return_counts=False, axis=None, *, equal_nan=True): + """ + Find the unique elements of an array. + + Returns the sorted unique elements of an array. There are three optional + outputs in addition to the unique elements: + + * the indices of the input array that give the unique values + * the indices of the unique array that reconstruct the input array + * the number of times each unique value comes up in the input array + + Parameters + ---------- + ar : array_like + Input array. Unless `axis` is specified, this will be flattened if it + is not already 1-D. + return_index : bool, optional + If True, also return the indices of `ar` (along the specified axis, + if provided, or in the flattened array) that result in the unique array. + return_inverse : bool, optional + If True, also return the indices of the unique array (for the specified + axis, if provided) that can be used to reconstruct `ar`. + return_counts : bool, optional + If True, also return the number of times each unique item appears + in `ar`. + axis : int or None, optional + The axis to operate on. If None, `ar` will be flattened. If an integer, + the subarrays indexed by the given axis will be flattened and treated + as the elements of a 1-D array with the dimension of the given axis, + see the notes for more details. Object arrays or structured arrays + that contain objects are not supported if the `axis` kwarg is used. The + default is None. + + .. versionadded:: 1.13.0 + + equal_nan : bool, optional + If True, collapses multiple NaN values in the return array into one. + + .. versionadded:: 1.24 + + Returns + ------- + unique : ndarray + The sorted unique values. + unique_indices : ndarray, optional + The indices of the first occurrences of the unique values in the + original array. Only provided if `return_index` is True. + unique_inverse : ndarray, optional + The indices to reconstruct the original array from the + unique array. Only provided if `return_inverse` is True. + unique_counts : ndarray, optional + The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + + .. versionadded:: 1.9.0 + + See Also + -------- + repeat : Repeat elements of an array. + + Notes + ----- + When an axis is specified the subarrays indexed by the axis are sorted. + This is done by making the specified axis the first dimension of the array + (move the axis to the first dimension to keep the order of the other axes) + and then flattening the subarrays in C order. The flattened subarrays are + then viewed as a structured type with each element given a label, with the + effect that we end up with a 1-D array of structured types that can be + treated in the same way as any other 1-D array. The result is that the + flattened subarrays are sorted in lexicographic order starting with the + first element. + + .. versionchanged: 1.21 + If nan values are in the input array, a single nan is put + to the end of the sorted unique values. + + Also for complex arrays all NaN values are considered equivalent + (no matter whether the NaN is in the real or imaginary part). + As the representant for the returned array the smallest one in the + lexicographical order is chosen - see np.sort for how the lexicographical + order is defined for complex arrays. + + .. versionchanged: 2.0 + For multi-dimensional inputs, ``unique_inverse`` is reshaped + such that the input can be reconstructed using + ``np.take(unique, unique_inverse, axis=axis)``. The result is + now not 1-dimensional when ``axis=None``. + + Note that in NumPy 2.0.0 a higher dimensional array was returned also + when ``axis`` was not ``None``. This was reverted, but + ``inverse.reshape(-1)`` can be used to ensure compatibility with both + versions. + + Examples + -------- + >>> np.unique([1, 1, 2, 2, 3, 3]) + array([1, 2, 3]) + >>> a = np.array([[1, 1], [2, 3]]) + >>> np.unique(a) + array([1, 2, 3]) + + Return the unique rows of a 2D array + + >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]]) + >>> np.unique(a, axis=0) + array([[1, 0, 0], [2, 3, 4]]) + + Return the indices of the original array that give the unique values: + + >>> a = np.array(['a', 'b', 'b', 'c', 'a']) + >>> u, indices = np.unique(a, return_index=True) + >>> u + array(['a', 'b', 'c'], dtype='>> indices + array([0, 1, 3]) + >>> a[indices] + array(['a', 'b', 'c'], dtype='>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> u, indices = np.unique(a, return_inverse=True) + >>> u + array([1, 2, 3, 4, 6]) + >>> indices + array([0, 1, 4, 3, 1, 2, 1]) + >>> u[indices] + array([1, 2, 6, 4, 2, 3, 2]) + + Reconstruct the input values from the unique values and counts: + + >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> values, counts = np.unique(a, return_counts=True) + >>> values + array([1, 2, 3, 4, 6]) + >>> counts + array([1, 3, 1, 1, 1]) + >>> np.repeat(values, counts) + array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved + + """ + ar = np.asanyarray(ar) + if axis is None: + ret = _unique1d(ar, return_index, return_inverse, return_counts, + equal_nan=equal_nan, inverse_shape=ar.shape, axis=None) + return _unpack_tuple(ret) + + # axis was specified and not None + try: + ar = np.moveaxis(ar, axis, 0) + except np.exceptions.AxisError: + # this removes the "axis1" or "axis2" prefix from the error message + raise np.exceptions.AxisError(axis, ar.ndim) from None + inverse_shape = [1] * ar.ndim + inverse_shape[axis] = ar.shape[0] + + # Must reshape to a contiguous 2D array for this to work... + orig_shape, orig_dtype = ar.shape, ar.dtype + ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp)) + ar = np.ascontiguousarray(ar) + dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])] + + # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured + # data type with `m` fields where each field has the data type of `ar`. + # In the following, we create the array `consolidated`, which has + # shape `(n,)` with data type `dtype`. + try: + if ar.shape[1] > 0: + consolidated = ar.view(dtype) + else: + # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is + # a data type with itemsize 0, and the call `ar.view(dtype)` will + # fail. Instead, we'll use `np.empty` to explicitly create the + # array with shape `(len(ar),)`. Because `dtype` in this case has + # itemsize 0, the total size of the result is still 0 bytes. + consolidated = np.empty(len(ar), dtype=dtype) + except TypeError as e: + # There's no good way to do this for object arrays, etc... + msg = 'The axis argument to unique is not supported for dtype {dt}' + raise TypeError(msg.format(dt=ar.dtype)) from e + + def reshape_uniq(uniq): + n = len(uniq) + uniq = uniq.view(orig_dtype) + uniq = uniq.reshape(n, *orig_shape[1:]) + uniq = np.moveaxis(uniq, 0, axis) + return uniq + + output = _unique1d(consolidated, return_index, + return_inverse, return_counts, + equal_nan=equal_nan, inverse_shape=inverse_shape, + axis=axis) + output = (reshape_uniq(output[0]),) + output[1:] + return _unpack_tuple(output) + + +def _unique1d(ar, return_index=False, return_inverse=False, + return_counts=False, *, equal_nan=True, inverse_shape=None, + axis=None): + """ + Find the unique elements of an array, ignoring shape. + """ + ar = np.asanyarray(ar).flatten() + + optional_indices = return_index or return_inverse + + if optional_indices: + perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') + aux = ar[perm] + else: + ar.sort() + aux = ar + mask = np.empty(aux.shape, dtype=np.bool) + mask[:1] = True + if (equal_nan and aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and + np.isnan(aux[-1])): + if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent + aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left') + else: + aux_firstnan = np.searchsorted(aux, aux[-1], side='left') + if aux_firstnan > 0: + mask[1:aux_firstnan] = ( + aux[1:aux_firstnan] != aux[:aux_firstnan - 1]) + mask[aux_firstnan] = True + mask[aux_firstnan + 1:] = False + else: + mask[1:] = aux[1:] != aux[:-1] + + ret = (aux[mask],) + if return_index: + ret += (perm[mask],) + if return_inverse: + imask = np.cumsum(mask) - 1 + inv_idx = np.empty(mask.shape, dtype=np.intp) + inv_idx[perm] = imask + ret += (inv_idx.reshape(inverse_shape) if axis is None else inv_idx,) + if return_counts: + idx = np.concatenate(np.nonzero(mask) + ([mask.size],)) + ret += (np.diff(idx),) + return ret + + +# Array API set functions + +class UniqueAllResult(NamedTuple): + values: np.ndarray + indices: np.ndarray + inverse_indices: np.ndarray + counts: np.ndarray + + +class UniqueCountsResult(NamedTuple): + values: np.ndarray + counts: np.ndarray + + +class UniqueInverseResult(NamedTuple): + values: np.ndarray + inverse_indices: np.ndarray + + +def _unique_all_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_all_dispatcher) +def unique_all(x): + """ + Find the unique elements of an array, and counts, inverse and indices. + + This function is an Array API compatible alternative to: + + >>> x = np.array([1, 1, 2]) + >>> np.unique(x, return_index=True, return_inverse=True, + ... return_counts=True, equal_nan=False) + (array([1, 2]), array([0, 2]), array([0, 0, 1]), array([2, 1])) + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : namedtuple + The result containing: + + * values - The unique elements of an input array. + * indices - The first occurring indices for each unique element. + * inverse_indices - The indices from the set of unique elements + that reconstruct `x`. + * counts - The corresponding counts for each unique element. + + See Also + -------- + unique : Find the unique elements of an array. + + """ + result = unique( + x, + return_index=True, + return_inverse=True, + return_counts=True, + equal_nan=False + ) + return UniqueAllResult(*result) + + +def _unique_counts_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_counts_dispatcher) +def unique_counts(x): + """ + Find the unique elements and counts of an input array `x`. + + This function is an Array API compatible alternative to: + + >>> x = np.array([1, 1, 2]) + >>> np.unique(x, return_counts=True, equal_nan=False) + (array([1, 2]), array([2, 1])) + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : namedtuple + The result containing: + + * values - The unique elements of an input array. + * counts - The corresponding counts for each unique element. + + See Also + -------- + unique : Find the unique elements of an array. + + """ + result = unique( + x, + return_index=False, + return_inverse=False, + return_counts=True, + equal_nan=False + ) + return UniqueCountsResult(*result) + + +def _unique_inverse_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_inverse_dispatcher) +def unique_inverse(x): + """ + Find the unique elements of `x` and indices to reconstruct `x`. + + This function is Array API compatible alternative to: + + >>> x = np.array([1, 1, 2]) + >>> np.unique(x, return_inverse=True, equal_nan=False) + (array([1, 2]), array([0, 0, 1])) + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : namedtuple + The result containing: + + * values - The unique elements of an input array. + * inverse_indices - The indices from the set of unique elements + that reconstruct `x`. + + See Also + -------- + unique : Find the unique elements of an array. + + """ + result = unique( + x, + return_index=False, + return_inverse=True, + return_counts=False, + equal_nan=False + ) + return UniqueInverseResult(*result) + + +def _unique_values_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_values_dispatcher) +def unique_values(x): + """ + Returns the unique elements of an input array `x`. + + This function is Array API compatible alternative to: + + >>> x = np.array([1, 1, 2]) + >>> np.unique(x, equal_nan=False) + array([1, 2]) + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : ndarray + The unique elements of an input array. + + See Also + -------- + unique : Find the unique elements of an array. + + """ + return unique( + x, + return_index=False, + return_inverse=False, + return_counts=False, + equal_nan=False + ) + + +def _intersect1d_dispatcher( + ar1, ar2, assume_unique=None, return_indices=None): + return (ar1, ar2) + + +@array_function_dispatch(_intersect1d_dispatcher) +def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): + """ + Find the intersection of two arrays. + + Return the sorted, unique values that are in both of the input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. Will be flattened if not already 1D. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. If True but ``ar1`` or ``ar2`` are not + unique, incorrect results and out-of-bounds indices could result. + Default is False. + return_indices : bool + If True, the indices which correspond to the intersection of the two + arrays are returned. The first instance of a value is used if there are + multiple. Default is False. + + .. versionadded:: 1.15.0 + + Returns + ------- + intersect1d : ndarray + Sorted 1D array of common and unique elements. + comm1 : ndarray + The indices of the first occurrences of the common values in `ar1`. + Only provided if `return_indices` is True. + comm2 : ndarray + The indices of the first occurrences of the common values in `ar2`. + Only provided if `return_indices` is True. + + Examples + -------- + >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) + array([1, 3]) + + To intersect more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([3]) + + To return the indices of the values common to the input arrays + along with the intersected values: + + >>> x = np.array([1, 1, 2, 3, 4]) + >>> y = np.array([2, 1, 4, 6]) + >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True) + >>> x_ind, y_ind + (array([0, 2, 4]), array([1, 0, 2])) + >>> xy, x[x_ind], y[y_ind] + (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4])) + + """ + ar1 = np.asanyarray(ar1) + ar2 = np.asanyarray(ar2) + + if not assume_unique: + if return_indices: + ar1, ind1 = unique(ar1, return_index=True) + ar2, ind2 = unique(ar2, return_index=True) + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + else: + ar1 = ar1.ravel() + ar2 = ar2.ravel() + + aux = np.concatenate((ar1, ar2)) + if return_indices: + aux_sort_indices = np.argsort(aux, kind='mergesort') + aux = aux[aux_sort_indices] + else: + aux.sort() + + mask = aux[1:] == aux[:-1] + int1d = aux[:-1][mask] + + if return_indices: + ar1_indices = aux_sort_indices[:-1][mask] + ar2_indices = aux_sort_indices[1:][mask] - ar1.size + if not assume_unique: + ar1_indices = ind1[ar1_indices] + ar2_indices = ind2[ar2_indices] + + return int1d, ar1_indices, ar2_indices + else: + return int1d + + +def _setxor1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setxor1d_dispatcher) +def setxor1d(ar1, ar2, assume_unique=False): + """ + Find the set exclusive-or of two arrays. + + Return the sorted, unique values that are in only one (not both) of the + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setxor1d : ndarray + Sorted 1D array of unique values that are in only one of the input + arrays. + + Examples + -------- + >>> a = np.array([1, 2, 3, 2, 4]) + >>> b = np.array([2, 3, 5, 7, 5]) + >>> np.setxor1d(a,b) + array([1, 4, 5, 7]) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = np.concatenate((ar1, ar2)) + if aux.size == 0: + return aux + + aux.sort() + flag = np.concatenate(([True], aux[1:] != aux[:-1], [True])) + return aux[flag[1:] & flag[:-1]] + + +def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *, + kind=None): + return (ar1, ar2) + + +@array_function_dispatch(_in1d_dispatcher) +def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): + """ + Test whether each element of a 1-D array is also present in a second array. + + .. deprecated:: 2.0 + Use :func:`isin` instead of `in1d` for new code. + + Returns a boolean array the same length as `ar1` that is True + where an element of `ar1` is in `ar2` and False otherwise. + + Parameters + ---------- + ar1 : (M,) array_like + Input array. + ar2 : array_like + The values against which to test each value of `ar1`. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted (that is, + False where an element of `ar1` is in `ar2` and True otherwise). + Default is False. ``np.in1d(a, b, invert=True)`` is equivalent + to (but is faster than) ``np.invert(in1d(a, b))``. + kind : {None, 'sort', 'table'}, optional + The algorithm to use. This will not affect the final result, + but will affect the speed and memory use. The default, None, + will select automatically based on memory considerations. + + * If 'sort', will use a mergesort-based approach. This will have + a memory usage of roughly 6 times the sum of the sizes of + `ar1` and `ar2`, not accounting for size of dtypes. + * If 'table', will use a lookup table approach similar + to a counting sort. This is only available for boolean and + integer arrays. This will have a memory usage of the + size of `ar1` plus the max-min value of `ar2`. `assume_unique` + has no effect when the 'table' option is used. + * If None, will automatically choose 'table' if + the required memory allocation is less than or equal to + 6 times the sum of the sizes of `ar1` and `ar2`, + otherwise will use 'sort'. This is done to not use + a large amount of memory by default, even though + 'table' may be faster in most cases. If 'table' is chosen, + `assume_unique` will have no effect. + + .. versionadded:: 1.8.0 + + Returns + ------- + in1d : (M,) ndarray, bool + The values `ar1[in1d]` are in `ar2`. + + See Also + -------- + isin : Version of this function that preserves the + shape of ar1. + + Notes + ----- + `in1d` can be considered as an element-wise function version of the + python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly + equivalent to ``np.array([item in b for item in a])``. + However, this idea fails if `ar2` is a set, or similar (non-sequence) + container: As ``ar2`` is converted to an array, in those cases + ``asarray(ar2)`` is an object array rather than the expected array of + contained values. + + Using ``kind='table'`` tends to be faster than `kind='sort'` if the + following relationship is true: + ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``, + but may use greater memory. The default value for `kind` will + be automatically selected based only on memory usage, so one may + manually set ``kind='table'`` if memory constraints can be relaxed. + + .. versionadded:: 1.4.0 + + Examples + -------- + >>> test = np.array([0, 1, 2, 5, 0]) + >>> states = [0, 2] + >>> mask = np.in1d(test, states) + >>> mask + array([ True, False, True, False, True]) + >>> test[mask] + array([0, 2, 0]) + >>> mask = np.in1d(test, states, invert=True) + >>> mask + array([False, True, False, True, False]) + >>> test[mask] + array([1, 5]) + """ + + # Deprecated in NumPy 2.0, 2023-08-18 + warnings.warn( + "`in1d` is deprecated. Use `np.isin` instead.", + DeprecationWarning, + stacklevel=2 + ) + + return _in1d(ar1, ar2, assume_unique, invert, kind=kind) + + +def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): + # Ravel both arrays, behavior for the first array could be different + ar1 = np.asarray(ar1).ravel() + ar2 = np.asarray(ar2).ravel() + + # Ensure that iteration through object arrays yields size-1 arrays + if ar2.dtype == object: + ar2 = ar2.reshape(-1, 1) + + if kind not in {None, 'sort', 'table'}: + raise ValueError( + f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.") + + # Can use the table method if all arrays are integers or boolean: + is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2)) + use_table_method = is_int_arrays and kind in {None, 'table'} + + if use_table_method: + if ar2.size == 0: + if invert: + return np.ones_like(ar1, dtype=bool) + else: + return np.zeros_like(ar1, dtype=bool) + + # Convert booleans to uint8 so we can use the fast integer algorithm + if ar1.dtype == bool: + ar1 = ar1.astype(np.uint8) + if ar2.dtype == bool: + ar2 = ar2.astype(np.uint8) + + ar2_min = int(np.min(ar2)) + ar2_max = int(np.max(ar2)) + + ar2_range = ar2_max - ar2_min + + # Constraints on whether we can actually use the table method: + # 1. Assert memory usage is not too large + below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size) + # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype + range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max + + # Optimal performance is for approximately + # log10(size) > (log10(range) - 2.27) / 0.927. + # However, here we set the requirement that by default + # the intermediate array can only be 6x + # the combined memory allocation of the original + # arrays. See discussion on + # https://github.com/numpy/numpy/pull/12065. + + if ( + range_safe_from_overflow and + (below_memory_constraint or kind == 'table') + ): + + if invert: + outgoing_array = np.ones_like(ar1, dtype=bool) + else: + outgoing_array = np.zeros_like(ar1, dtype=bool) + + # Make elements 1 where the integer exists in ar2 + if invert: + isin_helper_ar = np.ones(ar2_range + 1, dtype=bool) + isin_helper_ar[ar2 - ar2_min] = 0 + else: + isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool) + isin_helper_ar[ar2 - ar2_min] = 1 + + # Mask out elements we know won't work + basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) + in_range_ar1 = ar1[basic_mask] + if in_range_ar1.size == 0: + # Nothing more to do, since all values are out of range. + return outgoing_array + + # Unfortunately, ar2_min can be out of range for `intp` even + # if the calculation result must fit in range (and be positive). + # In that case, use ar2.dtype which must work for all unmasked + # values. + try: + ar2_min = np.array(ar2_min, dtype=np.intp) + dtype = np.intp + except OverflowError: + dtype = ar2.dtype + + out = np.empty_like(in_range_ar1, dtype=np.intp) + outgoing_array[basic_mask] = isin_helper_ar[ + np.subtract(in_range_ar1, ar2_min, dtype=dtype, + out=out, casting="unsafe")] + + return outgoing_array + elif kind == 'table': # not range_safe_from_overflow + raise RuntimeError( + "You have specified kind='table', " + "but the range of values in `ar2` or `ar1` exceed the " + "maximum integer of the datatype. " + "Please set `kind` to None or 'sort'." + ) + elif kind == 'table': + raise ValueError( + "The 'table' method is only " + "supported for boolean or integer arrays. " + "Please select 'sort' or None for kind." + ) + + + # Check if one of the arrays may contain arbitrary objects + contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject + + # This code is run when + # a) the first condition is true, making the code significantly faster + # b) the second condition is true (i.e. `ar1` or `ar2` may contain + # arbitrary objects), since then sorting is not guaranteed to work + if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: + if invert: + mask = np.ones(len(ar1), dtype=bool) + for a in ar2: + mask &= (ar1 != a) + else: + mask = np.zeros(len(ar1), dtype=bool) + for a in ar2: + mask |= (ar1 == a) + return mask + + # Otherwise use sorting + if not assume_unique: + ar1, rev_idx = np.unique(ar1, return_inverse=True) + ar2 = np.unique(ar2) + + ar = np.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = np.concatenate((bool_ar, [invert])) + ret = np.empty(ar.shape, dtype=bool) + ret[order] = flag + + if assume_unique: + return ret[:len(ar1)] + else: + return ret[rev_idx] + + +def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None, + *, kind=None): + return (element, test_elements) + + +@array_function_dispatch(_isin_dispatcher) +def isin(element, test_elements, assume_unique=False, invert=False, *, + kind=None): + """ + Calculates ``element in test_elements``, broadcasting over `element` only. + Returns a boolean array of the same shape as `element` that is True + where an element of `element` is in `test_elements` and False otherwise. + + Parameters + ---------- + element : array_like + Input array. + test_elements : array_like + The values against which to test each value of `element`. + This argument is flattened if it is an array or array_like. + See notes for behavior with non-array-like parameters. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted, as if + calculating `element not in test_elements`. Default is False. + ``np.isin(a, b, invert=True)`` is equivalent to (but faster + than) ``np.invert(np.isin(a, b))``. + kind : {None, 'sort', 'table'}, optional + The algorithm to use. This will not affect the final result, + but will affect the speed and memory use. The default, None, + will select automatically based on memory considerations. + + * If 'sort', will use a mergesort-based approach. This will have + a memory usage of roughly 6 times the sum of the sizes of + `element` and `test_elements`, not accounting for size of dtypes. + * If 'table', will use a lookup table approach similar + to a counting sort. This is only available for boolean and + integer arrays. This will have a memory usage of the + size of `element` plus the max-min value of `test_elements`. + `assume_unique` has no effect when the 'table' option is used. + * If None, will automatically choose 'table' if + the required memory allocation is less than or equal to + 6 times the sum of the sizes of `element` and `test_elements`, + otherwise will use 'sort'. This is done to not use + a large amount of memory by default, even though + 'table' may be faster in most cases. If 'table' is chosen, + `assume_unique` will have no effect. + + + Returns + ------- + isin : ndarray, bool + Has the same shape as `element`. The values `element[isin]` + are in `test_elements`. + + Notes + ----- + + `isin` is an element-wise function version of the python keyword `in`. + ``isin(a, b)`` is roughly equivalent to + ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences. + + `element` and `test_elements` are converted to arrays if they are not + already. If `test_elements` is a set (or other non-sequence collection) + it will be converted to an object array with one element, rather than an + array of the values contained in `test_elements`. This is a consequence + of the `array` constructor's way of handling non-sequence collections. + Converting the set to a list usually gives the desired behavior. + + Using ``kind='table'`` tends to be faster than `kind='sort'` if the + following relationship is true: + ``log10(len(test_elements)) > + (log10(max(test_elements)-min(test_elements)) - 2.27) / 0.927``, + but may use greater memory. The default value for `kind` will + be automatically selected based only on memory usage, so one may + manually set ``kind='table'`` if memory constraints can be relaxed. + + .. versionadded:: 1.13.0 + + Examples + -------- + >>> element = 2*np.arange(4).reshape((2, 2)) + >>> element + array([[0, 2], + [4, 6]]) + >>> test_elements = [1, 2, 4, 8] + >>> mask = np.isin(element, test_elements) + >>> mask + array([[False, True], + [ True, False]]) + >>> element[mask] + array([2, 4]) + + The indices of the matched values can be obtained with `nonzero`: + + >>> np.nonzero(mask) + (array([0, 1]), array([1, 0])) + + The test can also be inverted: + + >>> mask = np.isin(element, test_elements, invert=True) + >>> mask + array([[ True, False], + [False, True]]) + >>> element[mask] + array([0, 6]) + + Because of how `array` handles sets, the following does not + work as expected: + + >>> test_set = {1, 2, 4, 8} + >>> np.isin(element, test_set) + array([[False, False], + [False, False]]) + + Casting the set to a list gives the expected result: + + >>> np.isin(element, list(test_set)) + array([[False, True], + [ True, False]]) + """ + element = np.asarray(element) + return _in1d(element, test_elements, assume_unique=assume_unique, + invert=invert, kind=kind).reshape(element.shape) + + +def _union1d_dispatcher(ar1, ar2): + return (ar1, ar2) + + +@array_function_dispatch(_union1d_dispatcher) +def union1d(ar1, ar2): + """ + Find the union of two arrays. + + Return the unique, sorted array of values that are in either of the two + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. They are flattened if they are not already 1D. + + Returns + ------- + union1d : ndarray + Unique, sorted union of the input arrays. + + Examples + -------- + >>> np.union1d([-1, 0, 1], [-2, 0, 2]) + array([-2, -1, 0, 1, 2]) + + To find the union of more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([1, 2, 3, 4, 6]) + """ + return unique(np.concatenate((ar1, ar2), axis=None)) + + +def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setdiff1d_dispatcher) +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Find the set difference of two arrays. + + Return the unique values in `ar1` that are not in `ar2`. + + Parameters + ---------- + ar1 : array_like + Input array. + ar2 : array_like + Input comparison array. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setdiff1d : ndarray + 1D array of values in `ar1` that are not in `ar2`. The result + is sorted when `assume_unique=False`, but otherwise only sorted + if the input is sorted. + + Examples + -------- + >>> a = np.array([1, 2, 3, 2, 4, 1]) + >>> b = np.array([3, 4, 5, 6]) + >>> np.setdiff1d(a, b) + array([1, 2]) + + """ + if assume_unique: + ar1 = np.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[_in1d(ar1, ar2, assume_unique=True, invert=True)] diff --git a/phivenv/Lib/site-packages/numpy/lib/_arraysetops_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_arraysetops_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b7422e7e927dcf42a3ca15aeb68598f26f5ae35a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_arraysetops_impl.pyi @@ -0,0 +1,399 @@ +from typing import ( + Any, + Generic, + Literal as L, + NamedTuple, + overload, + SupportsIndex, + TypeVar, +) + +import numpy as np +from numpy import ( + generic, + number, + ushort, + ubyte, + uintc, + uint, + ulonglong, + short, + int8, + byte, + intc, + int_, + intp, + longlong, + half, + single, + double, + longdouble, + csingle, + cdouble, + clongdouble, + timedelta64, + datetime64, + object_, + str_, + bytes_, + void, +) + +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeDT64_co, + _ArrayLikeTD64_co, + _ArrayLikeObject_co, + _ArrayLikeNumber_co, +) + +_SCT = TypeVar("_SCT", bound=generic) +_NumberType = TypeVar("_NumberType", bound=number[Any]) + +# Explicitly set all allowed values to prevent accidental castings to +# abstract dtypes (their common super-type). +# +# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) +# which could result in, for example, `int64` and `float64`producing a +# `number[_64Bit]` array +_SCTNoCast = TypeVar( + "_SCTNoCast", + np.bool, + ushort, + ubyte, + uintc, + uint, + ulonglong, + short, + byte, + intc, + int_, + longlong, + half, + single, + double, + longdouble, + csingle, + cdouble, + clongdouble, + timedelta64, + datetime64, + object_, + str_, + bytes_, + void, +) + +class UniqueAllResult(NamedTuple, Generic[_SCT]): + values: NDArray[_SCT] + indices: NDArray[intp] + inverse_indices: NDArray[intp] + counts: NDArray[intp] + +class UniqueCountsResult(NamedTuple, Generic[_SCT]): + values: NDArray[_SCT] + counts: NDArray[intp] + +class UniqueInverseResult(NamedTuple, Generic[_SCT]): + values: NDArray[_SCT] + inverse_indices: NDArray[intp] + +__all__: list[str] + +@overload +def ediff1d( + ary: _ArrayLikeBool_co, + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[int8]: ... +@overload +def ediff1d( + ary: _ArrayLike[_NumberType], + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[_NumberType]: ... +@overload +def ediff1d( + ary: _ArrayLikeNumber_co, + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[Any]: ... +@overload +def ediff1d( + ary: _ArrayLikeDT64_co | _ArrayLikeTD64_co, + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[timedelta64]: ... +@overload +def ediff1d( + ary: _ArrayLikeObject_co, + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[object_]: ... + +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[False] = ..., + return_inverse: L[False] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[False] = ..., + return_inverse: L[False] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> NDArray[Any]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[True] = ..., + return_inverse: L[False] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[True] = ..., + return_inverse: L[False] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[False] = ..., + return_inverse: L[True] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[False] = ..., + return_inverse: L[True] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[False] = ..., + return_inverse: L[False] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[False] = ..., + return_inverse: L[False] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[True] = ..., + return_inverse: L[True] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[True] = ..., + return_inverse: L[True] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[True] = ..., + return_inverse: L[False] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[True] = ..., + return_inverse: L[False] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[False] = ..., + return_inverse: L[True] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[False] = ..., + return_inverse: L[True] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[True] = ..., + return_inverse: L[True] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[True] = ..., + return_inverse: L[True] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp], NDArray[intp]]: ... + +@overload +def unique_all( + x: _ArrayLike[_SCT], / +) -> UniqueAllResult[_SCT]: ... +@overload +def unique_all( + x: ArrayLike, / +) -> UniqueAllResult[Any]: ... + +@overload +def unique_counts( + x: _ArrayLike[_SCT], / +) -> UniqueCountsResult[_SCT]: ... +@overload +def unique_counts( + x: ArrayLike, / +) -> UniqueCountsResult[Any]: ... + +@overload +def unique_inverse(x: _ArrayLike[_SCT], /) -> UniqueInverseResult[_SCT]: ... +@overload +def unique_inverse(x: ArrayLike, /) -> UniqueInverseResult[Any]: ... + +@overload +def unique_values(x: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +@overload +def unique_values(x: ArrayLike, /) -> NDArray[Any]: ... + +@overload +def intersect1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], + assume_unique: bool = ..., + return_indices: L[False] = ..., +) -> NDArray[_SCTNoCast]: ... +@overload +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., + return_indices: L[False] = ..., +) -> NDArray[Any]: ... +@overload +def intersect1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], + assume_unique: bool = ..., + return_indices: L[True] = ..., +) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ... +@overload +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., + return_indices: L[True] = ..., +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... + +@overload +def setxor1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], + assume_unique: bool = ..., +) -> NDArray[_SCTNoCast]: ... +@overload +def setxor1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., +) -> NDArray[Any]: ... + +def isin( + element: ArrayLike, + test_elements: ArrayLike, + assume_unique: bool = ..., + invert: bool = ..., + *, + kind: None | str = ..., +) -> NDArray[np.bool]: ... + +@overload +def union1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], +) -> NDArray[_SCTNoCast]: ... +@overload +def union1d( + ar1: ArrayLike, + ar2: ArrayLike, +) -> NDArray[Any]: ... + +@overload +def setdiff1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], + assume_unique: bool = ..., +) -> NDArray[_SCTNoCast]: ... +@overload +def setdiff1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., +) -> NDArray[Any]: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_arrayterator_impl.py b/phivenv/Lib/site-packages/numpy/lib/_arrayterator_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..4f976dc415e40299b98d7f8710e7fe15b53a298f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_arrayterator_impl.py @@ -0,0 +1,221 @@ +""" +A buffered iterator for big arrays. + +This module solves the problem of iterating over a big file-based array +without having to read it into memory. The `Arrayterator` class wraps +an array object, and when iterated it will return sub-arrays with at most +a user-specified number of elements. + +""" +from operator import mul +from functools import reduce + +__all__ = ['Arrayterator'] + + +class Arrayterator: + """ + Buffered iterator for big arrays. + + `Arrayterator` creates a buffered iterator for reading big arrays in small + contiguous blocks. The class is useful for objects stored in the + file system. It allows iteration over the object *without* reading + everything in memory; instead, small blocks are read and iterated over. + + `Arrayterator` can be used with any object that supports multidimensional + slices. This includes NumPy arrays, but also variables from + Scientific.IO.NetCDF or pynetcdf for example. + + Parameters + ---------- + var : array_like + The object to iterate over. + buf_size : int, optional + The buffer size. If `buf_size` is supplied, the maximum amount of + data that will be read into memory is `buf_size` elements. + Default is None, which will read as many element as possible + into memory. + + Attributes + ---------- + var + buf_size + start + stop + step + shape + flat + + See Also + -------- + numpy.ndenumerate : Multidimensional array iterator. + numpy.flatiter : Flat array iterator. + numpy.memmap : Create a memory-map to an array stored + in a binary file on disk. + + Notes + ----- + The algorithm works by first finding a "running dimension", along which + the blocks will be extracted. Given an array of dimensions + ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the + first dimension will be used. If, on the other hand, + ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. + Blocks are extracted along this dimension, and when the last block is + returned the process continues from the next dimension, until all + elements have been read. + + Examples + -------- + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + >>> a_itor.shape + (3, 4, 5, 6) + + Now we can iterate over ``a_itor``, and it will return arrays of size + two. Since `buf_size` was smaller than any dimension, the first + dimension will be iterated over first: + + >>> for subarr in a_itor: + ... if not subarr.all(): + ... print(subarr, subarr.shape) # doctest: +SKIP + >>> # [[[[0 1]]]] (1, 1, 1, 2) + + """ + + def __init__(self, var, buf_size=None): + self.var = var + self.buf_size = buf_size + + self.start = [0 for dim in var.shape] + self.stop = [dim for dim in var.shape] + self.step = [1 for dim in var.shape] + + def __getattr__(self, attr): + return getattr(self.var, attr) + + def __getitem__(self, index): + """ + Return a new arrayterator. + + """ + # Fix index, handling ellipsis and incomplete slices. + if not isinstance(index, tuple): + index = (index,) + fixed = [] + length, dims = len(index), self.ndim + for slice_ in index: + if slice_ is Ellipsis: + fixed.extend([slice(None)] * (dims-length+1)) + length = len(fixed) + elif isinstance(slice_, int): + fixed.append(slice(slice_, slice_+1, 1)) + else: + fixed.append(slice_) + index = tuple(fixed) + if len(index) < dims: + index += (slice(None),) * (dims-len(index)) + + # Return a new arrayterator object. + out = self.__class__(self.var, self.buf_size) + for i, (start, stop, step, slice_) in enumerate( + zip(self.start, self.stop, self.step, index)): + out.start[i] = start + (slice_.start or 0) + out.step[i] = step * (slice_.step or 1) + out.stop[i] = start + (slice_.stop or stop-start) + out.stop[i] = min(stop, out.stop[i]) + return out + + def __array__(self, dtype=None, copy=None): + """ + Return corresponding data. + + """ + slice_ = tuple(slice(*t) for t in zip( + self.start, self.stop, self.step)) + return self.var[slice_] + + @property + def flat(self): + """ + A 1-D flat iterator for Arrayterator objects. + + This iterator returns elements of the array to be iterated over in + `~lib.Arrayterator` one by one. + It is similar to `flatiter`. + + See Also + -------- + lib.Arrayterator + flatiter + + Examples + -------- + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + + >>> for subarr in a_itor.flat: + ... if not subarr: + ... print(subarr, type(subarr)) + ... + 0 + + """ + for block in self: + yield from block.flat + + @property + def shape(self): + """ + The shape of the array to be iterated over. + + For an example, see `Arrayterator`. + + """ + return tuple(((stop-start-1)//step+1) for start, stop, step in + zip(self.start, self.stop, self.step)) + + def __iter__(self): + # Skip arrays with degenerate dimensions + if [dim for dim in self.shape if dim <= 0]: + return + + start = self.start[:] + stop = self.stop[:] + step = self.step[:] + ndims = self.var.ndim + + while True: + count = self.buf_size or reduce(mul, self.shape) + + # iterate over each dimension, looking for the + # running dimension (ie, the dimension along which + # the blocks will be built from) + rundim = 0 + for i in range(ndims-1, -1, -1): + # if count is zero we ran out of elements to read + # along higher dimensions, so we read only a single position + if count == 0: + stop[i] = start[i]+1 + elif count <= self.shape[i]: + # limit along this dimension + stop[i] = start[i] + count*step[i] + rundim = i + else: + # read everything along this dimension + stop[i] = self.stop[i] + stop[i] = min(self.stop[i], stop[i]) + count = count//self.shape[i] + + # yield a block + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + yield self.var[slice_] + + # Update start position, taking care of overflow to + # other dimensions + start[rundim] = stop[rundim] # start where we stopped + for i in range(ndims-1, 0, -1): + if start[i] >= self.stop[i]: + start[i] = self.start[i] + start[i-1] += self.step[i-1] + if start[0] >= self.stop[0]: + return diff --git a/phivenv/Lib/site-packages/numpy/lib/_arrayterator_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_arrayterator_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4933aead949533dc305129bea00234e3f484f673 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_arrayterator_impl.pyi @@ -0,0 +1,48 @@ +from collections.abc import Generator +from typing import ( + Any, + TypeVar, + overload, +) + +from numpy import ndarray, dtype, generic +from numpy._typing import DTypeLike, NDArray + +# TODO: Set a shape bound once we've got proper shape support +_Shape = TypeVar("_Shape", bound=Any) +_DType = TypeVar("_DType", bound=dtype[Any]) +_ScalarType = TypeVar("_ScalarType", bound=generic) + +_Index = ( + ellipsis + | int + | slice + | tuple[ellipsis | int | slice, ...] +) + +__all__: list[str] + +# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, +# but its ``__getattr__` method does wrap around the former and thus has +# access to all its methods + +class Arrayterator(ndarray[_Shape, _DType]): + var: ndarray[_Shape, _DType] # type: ignore[assignment] + buf_size: None | int + start: list[int] + stop: list[int] + step: list[int] + + @property # type: ignore[misc] + def shape(self) -> tuple[int, ...]: ... + @property + def flat(self: NDArray[_ScalarType]) -> Generator[_ScalarType, None, None]: ... + def __init__( + self, var: ndarray[_Shape, _DType], buf_size: None | int = ... + ) -> None: ... + @overload + def __array__(self, dtype: None = ..., copy: None | bool = ...) -> ndarray[Any, _DType]: ... + @overload + def __array__(self, dtype: DTypeLike, copy: None | bool = ...) -> NDArray[Any]: ... + def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]: ... + def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_datasource.py b/phivenv/Lib/site-packages/numpy/lib/_datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..f7d96337b3afc2ee1d69a7f9040a4619b9c95b75 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_datasource.py @@ -0,0 +1,703 @@ +"""A file interface for handling local and remote data files. + +The goal of datasource is to abstract some of the file system operations +when dealing with data files so the researcher doesn't have to know all the +low-level details. Through datasource, a researcher can obtain and use a +file with one function call, regardless of location of the file. + +DataSource is meant to augment standard python libraries, not replace them. +It should work seamlessly with standard file IO operations and the os +module. + +DataSource files can originate locally or remotely: + +- local files : '/home/guido/src/local/data.txt' +- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' + +DataSource files can also be compressed or uncompressed. Currently only +gzip, bz2 and xz are supported. + +Example:: + + >>> # Create a DataSource, use os.curdir (default) for local storage. + >>> from numpy import DataSource + >>> ds = DataSource() + >>> + >>> # Open a remote file. + >>> # DataSource downloads the file, stores it locally in: + >>> # './www.google.com/index.html' + >>> # opens the file and returns a file object. + >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP + >>> + >>> # Use the file as you normally would + >>> fp.read() # doctest: +SKIP + >>> fp.close() # doctest: +SKIP + +""" +import os + +from .._utils import set_module + + +_open = open + + +def _check_mode(mode, encoding, newline): + """Check mode and that encoding and newline are compatible. + + Parameters + ---------- + mode : str + File open mode. + encoding : str + File encoding. + newline : str + Newline for text files. + + """ + if "t" in mode: + if "b" in mode: + raise ValueError("Invalid mode: %r" % (mode,)) + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + +# Using a class instead of a module-level dictionary +# to reduce the initial 'import numpy' overhead by +# deferring the import of lzma, bz2 and gzip until needed + +# TODO: .zip support, .tar support? +class _FileOpeners: + """ + Container for different methods to open (un-)compressed files. + + `_FileOpeners` contains a dictionary that holds one method for each + supported file format. Attribute lookup is implemented in such a way + that an instance of `_FileOpeners` itself can be indexed with the keys + of that dictionary. Currently uncompressed files as well as files + compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported. + + Notes + ----- + `_file_openers`, an instance of `_FileOpeners`, is made available for + use in the `_datasource` module. + + Examples + -------- + >>> import gzip + >>> np.lib._datasource._file_openers.keys() + [None, '.bz2', '.gz', '.xz', '.lzma'] + >>> np.lib._datasource._file_openers['.gz'] is gzip.open + True + + """ + + def __init__(self): + self._loaded = False + self._file_openers = {None: open} + + def _load(self): + if self._loaded: + return + + try: + import bz2 + self._file_openers[".bz2"] = bz2.open + except ImportError: + pass + + try: + import gzip + self._file_openers[".gz"] = gzip.open + except ImportError: + pass + + try: + import lzma + self._file_openers[".xz"] = lzma.open + self._file_openers[".lzma"] = lzma.open + except (ImportError, AttributeError): + # There are incompatible backports of lzma that do not have the + # lzma.open attribute, so catch that as well as ImportError. + pass + + self._loaded = True + + def keys(self): + """ + Return the keys of currently supported file openers. + + Parameters + ---------- + None + + Returns + ------- + keys : list + The keys are None for uncompressed files and the file extension + strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression + methods. + + """ + self._load() + return list(self._file_openers.keys()) + + def __getitem__(self, key): + self._load() + return self._file_openers[key] + +_file_openers = _FileOpeners() + +def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): + """ + Open `path` with `mode` and return the file object. + + If ``path`` is an URL, it will be downloaded, stored in the + `DataSource` `destpath` directory and opened from there. + + Parameters + ---------- + path : str or pathlib.Path + Local file path or URL to open. + mode : str, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to + append. Available modes depend on the type of object specified by + path. Default is 'r'. + destpath : str, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + The opened file. + + Notes + ----- + This is a convenience function that instantiates a `DataSource` and + returns the file object from ``DataSource.open(path)``. + + """ + + ds = DataSource(destpath) + return ds.open(path, mode, encoding=encoding, newline=newline) + + +@set_module('numpy.lib.npyio') +class DataSource: + """ + DataSource(destpath='.') + + A generic data source file (file, http, ftp, ...). + + DataSources can be local files or remote files/URLs. The files may + also be compressed or uncompressed. DataSource hides some of the + low-level details of downloading the file, allowing you to simply pass + in a valid file path (or URL) and obtain a file object. + + Parameters + ---------- + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Notes + ----- + URLs require a scheme string (``http://``) to be used, without it they + will fail:: + + >>> repos = np.lib.npyio.DataSource() + >>> repos.exists('www.google.com/index.html') + False + >>> repos.exists('http://www.google.com/index.html') + True + + Temporary directories are deleted when the DataSource is deleted. + + Examples + -------- + :: + + >>> ds = np.lib.npyio.DataSource('/home/guido') + >>> urlname = 'http://www.google.com/' + >>> gfile = ds.open('http://www.google.com/') + >>> ds.abspath(urlname) + '/home/guido/www.google.com/index.html' + + >>> ds = np.lib.npyio.DataSource(None) # use with temporary file + >>> ds.open('/home/guido/foobar.txt') + + >>> ds.abspath('/home/guido/foobar.txt') + '/tmp/.../home/guido/foobar.txt' + + """ + + def __init__(self, destpath=os.curdir): + """Create a DataSource with a local path at destpath.""" + if destpath: + self._destpath = os.path.abspath(destpath) + self._istmpdest = False + else: + import tempfile # deferring import to improve startup time + self._destpath = tempfile.mkdtemp() + self._istmpdest = True + + def __del__(self): + # Remove temp directories + if hasattr(self, '_istmpdest') and self._istmpdest: + import shutil + + shutil.rmtree(self._destpath) + + def _iszip(self, filename): + """Test if the filename is a zip file by looking at the file extension. + + """ + fname, ext = os.path.splitext(filename) + return ext in _file_openers.keys() + + def _iswritemode(self, mode): + """Test if the given mode will open a file for writing.""" + + # Currently only used to test the bz2 files. + _writemodes = ("w", "+") + for c in mode: + if c in _writemodes: + return True + return False + + def _splitzipext(self, filename): + """Split zip extension from filename and return filename. + + Returns + ------- + base, zip_ext : {tuple} + + """ + + if self._iszip(filename): + return os.path.splitext(filename) + else: + return filename, None + + def _possible_names(self, filename): + """Return a tuple containing compressed filename variations.""" + names = [filename] + if not self._iszip(filename): + for zipext in _file_openers.keys(): + if zipext: + names.append(filename+zipext) + return names + + def _isurl(self, path): + """Test if path is a net location. Tests the scheme and netloc.""" + + # We do this here to reduce the 'import numpy' initial import time. + from urllib.parse import urlparse + + # BUG : URLs require a scheme string ('http://') to be used. + # www.google.com will fail. + # Should we prepend the scheme for those that don't have it and + # test that also? Similar to the way we append .gz and test for + # for compressed versions of files. + + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + return bool(scheme and netloc) + + def _cache(self, path): + """Cache the file specified by path. + + Creates a copy of the file in the datasource cache. + + """ + # We import these here because importing them is slow and + # a significant fraction of numpy's total import time. + import shutil + from urllib.request import urlopen + + upath = self.abspath(path) + + # ensure directory exists + if not os.path.exists(os.path.dirname(upath)): + os.makedirs(os.path.dirname(upath)) + + # TODO: Doesn't handle compressed files! + if self._isurl(path): + with urlopen(path) as openedurl: + with _open(upath, 'wb') as f: + shutil.copyfileobj(openedurl, f) + else: + shutil.copyfile(path, upath) + return upath + + def _findfile(self, path): + """Searches for ``path`` and returns full path if found. + + If path is an URL, _findfile will cache a local copy and return the + path to the cached file. If path is a local file, _findfile will + return a path to that local file. + + The search will include possible compressed versions of the file + and return the first occurrence found. + + """ + + # Build list of possible local file paths + if not self._isurl(path): + # Valid local paths + filelist = self._possible_names(path) + # Paths in self._destpath + filelist += self._possible_names(self.abspath(path)) + else: + # Cached URLs in self._destpath + filelist = self._possible_names(self.abspath(path)) + # Remote URLs + filelist = filelist + self._possible_names(path) + + for name in filelist: + if self.exists(name): + if self._isurl(name): + name = self._cache(name) + return name + return None + + def abspath(self, path): + """ + Return absolute path of file in the DataSource directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + Notes + ----- + The functionality is based on `os.path.abspath`. + + """ + # We do this here to reduce the 'import numpy' initial import time. + from urllib.parse import urlparse + + # TODO: This should be more robust. Handles case where path includes + # the destpath, but not other sub-paths. Failing case: + # path = /home/guido/datafile.txt + # destpath = /home/alex/ + # upath = self.abspath(path) + # upath == '/home/alex/home/guido/datafile.txt' + + # handle case where path includes self._destpath + splitpath = path.split(self._destpath, 2) + if len(splitpath) > 1: + path = splitpath[1] + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + netloc = self._sanitize_relative_path(netloc) + upath = self._sanitize_relative_path(upath) + return os.path.join(self._destpath, netloc, upath) + + def _sanitize_relative_path(self, path): + """Return a sanitised relative path for which + os.path.abspath(os.path.join(base, path)).startswith(base) + """ + last = None + path = os.path.normpath(path) + while path != last: + last = path + # Note: os.path.join treats '/' as os.sep on Windows + path = path.lstrip(os.sep).lstrip('/') + path = path.lstrip(os.pardir).lstrip('..') + drive, path = os.path.splitdrive(path) # for Windows + return path + + def exists(self, path): + """ + Test if path exists. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + + # First test for local path + if os.path.exists(path): + return True + + # We import this here because importing urllib is slow and + # a significant fraction of numpy's total import time. + from urllib.request import urlopen + from urllib.error import URLError + + # Test cached url + upath = self.abspath(path) + if os.path.exists(upath): + return True + + # Test remote url + if self._isurl(path): + try: + netfile = urlopen(path) + netfile.close() + del(netfile) + return True + except URLError: + return False + return False + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object. + + If `path` is an URL, it will be downloaded, stored in the + `DataSource` directory and opened from there. + + Parameters + ---------- + path : str or pathlib.Path + Local file path or URL to open. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + + # TODO: There is no support for opening a file for writing which + # doesn't exist yet (creating a file). Should there be? + + # TODO: Add a ``subdir`` parameter for specifying the subdirectory + # used to store URLs in self._destpath. + + if self._isurl(path) and self._iswritemode(mode): + raise ValueError("URLs are not writeable") + + # NOTE: _findfile will fail on a new file opened for writing. + found = self._findfile(path) + if found: + _fname, ext = self._splitzipext(found) + if ext == 'bz2': + mode.replace("+", "") + return _file_openers[ext](found, mode=mode, + encoding=encoding, newline=newline) + else: + raise FileNotFoundError(f"{path} not found.") + + +class Repository (DataSource): + """ + Repository(baseurl, destpath='.') + + A data repository where multiple DataSource's share a base + URL/directory. + + `Repository` extends `DataSource` by prepending a base URL (or + directory) to all the files it handles. Use `Repository` when you will + be working with multiple files from one base URL. Initialize + `Repository` with the base URL, then refer to each file by its filename + only. + + Parameters + ---------- + baseurl : str + Path to the local directory or remote location that contains the + data files. + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Examples + -------- + To analyze all files in the repository, do something like this + (note: this is not self-contained code):: + + >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') + >>> for filename in filelist: + ... fp = repos.open(filename) + ... fp.analyze() + ... fp.close() + + Similarly you could use a URL for a repository:: + + >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data') + + """ + + def __init__(self, baseurl, destpath=os.curdir): + """Create a Repository with a shared url or directory of baseurl.""" + DataSource.__init__(self, destpath=destpath) + self._baseurl = baseurl + + def __del__(self): + DataSource.__del__(self) + + def _fullpath(self, path): + """Return complete path for path. Prepends baseurl if necessary.""" + splitpath = path.split(self._baseurl, 2) + if len(splitpath) == 1: + result = os.path.join(self._baseurl, path) + else: + result = path # path contains baseurl already + return result + + def _findfile(self, path): + """Extend DataSource method to prepend baseurl to ``path``.""" + return DataSource._findfile(self, self._fullpath(path)) + + def abspath(self, path): + """ + Return absolute path of file in the Repository directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + """ + return DataSource.abspath(self, self._fullpath(path)) + + def exists(self, path): + """ + Test if path exists prepending Repository base URL to path. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + return DataSource.exists(self, self._fullpath(path)) + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object prepending Repository base URL. + + If `path` is an URL, it will be downloaded, stored in the + DataSource directory and opened from there. + + Parameters + ---------- + path : str or pathlib.Path + Local file path or URL to open. This may, but does not have to, + include the `baseurl` with which the `Repository` was + initialized. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + return DataSource.open(self, self._fullpath(path), mode, + encoding=encoding, newline=newline) + + def listdir(self): + """ + List files in the source Repository. + + Returns + ------- + files : list of str or pathlib.Path + List of file names (not containing a directory part). + + Notes + ----- + Does not currently work for remote repositories. + + """ + if self._isurl(self._baseurl): + raise NotImplementedError( + "Directory listing of URLs, not supported yet.") + else: + return os.listdir(self._baseurl) diff --git a/phivenv/Lib/site-packages/numpy/lib/_function_base_impl.py b/phivenv/Lib/site-packages/numpy/lib/_function_base_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..f3bc7e1c0b8f92279a6ea1c5063cd0c4fc26753d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_function_base_impl.py @@ -0,0 +1,5936 @@ +import builtins +import collections.abc +import functools +import re +import sys +import warnings + +import numpy as np +import numpy._core.numeric as _nx +from numpy._core import transpose, overrides +from numpy._core.numeric import ( + ones, zeros_like, arange, concatenate, array, asarray, asanyarray, empty, + ndarray, take, dot, where, intp, integer, isscalar, absolute + ) +from numpy._core.umath import ( + pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, + mod, exp, not_equal, subtract, minimum + ) +from numpy._core.fromnumeric import ( + ravel, nonzero, partition, mean, any, sum + ) +from numpy._core.numerictypes import typecodes +from numpy.lib._twodim_base_impl import diag +from numpy._core.multiarray import ( + _place, bincount, normalize_axis_index, _monotonicity, + interp as compiled_interp, interp_complex as compiled_interp_complex + ) +from numpy._core._multiarray_umath import _array_converter +from numpy._utils import set_module + +# needed in this module for compatibility +from numpy.lib._histograms_impl import histogram, histogramdd # noqa: F401 + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', + 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'flip', + 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', + 'bincount', 'digitize', 'cov', 'corrcoef', + 'median', 'sinc', 'hamming', 'hanning', 'bartlett', + 'blackman', 'kaiser', 'trapezoid', 'trapz', 'i0', + 'meshgrid', 'delete', 'insert', 'append', 'interp', + 'quantile' + ] + +# _QuantileMethods is a dictionary listing all the supported methods to +# compute quantile/percentile. +# +# Below virtual_index refers to the index of the element where the percentile +# would be found in the sorted sample. +# When the sample contains exactly the percentile wanted, the virtual_index is +# an integer to the index of this element. +# When the percentile wanted is in between two elements, the virtual_index +# is made of a integer part (a.k.a 'i' or 'left') and a fractional part +# (a.k.a 'g' or 'gamma') +# +# Each method in _QuantileMethods has two properties +# get_virtual_index : Callable +# The function used to compute the virtual_index. +# fix_gamma : Callable +# A function used for discret methods to force the index to a specific value. +_QuantileMethods = dict( + # --- HYNDMAN and FAN METHODS + # Discrete methods + inverted_cdf=dict( + get_virtual_index=lambda n, quantiles: _inverted_cdf(n, quantiles), + fix_gamma=None, # should never be called + ), + averaged_inverted_cdf=dict( + get_virtual_index=lambda n, quantiles: (n * quantiles) - 1, + fix_gamma=lambda gamma, _: _get_gamma_mask( + shape=gamma.shape, + default_value=1., + conditioned_value=0.5, + where=gamma == 0), + ), + closest_observation=dict( + get_virtual_index=lambda n, quantiles: _closest_observation(n, + quantiles), + fix_gamma=None, # should never be called + ), + # Continuous methods + interpolated_inverted_cdf=dict( + get_virtual_index=lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0, 1), + fix_gamma=lambda gamma, _: gamma, + ), + hazen=dict( + get_virtual_index=lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0.5, 0.5), + fix_gamma=lambda gamma, _: gamma, + ), + weibull=dict( + get_virtual_index=lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0, 0), + fix_gamma=lambda gamma, _: gamma, + ), + # Default method. + # To avoid some rounding issues, `(n-1) * quantiles` is preferred to + # `_compute_virtual_index(n, quantiles, 1, 1)`. + # They are mathematically equivalent. + linear=dict( + get_virtual_index=lambda n, quantiles: (n - 1) * quantiles, + fix_gamma=lambda gamma, _: gamma, + ), + median_unbiased=dict( + get_virtual_index=lambda n, quantiles: + _compute_virtual_index(n, quantiles, 1 / 3.0, 1 / 3.0), + fix_gamma=lambda gamma, _: gamma, + ), + normal_unbiased=dict( + get_virtual_index=lambda n, quantiles: + _compute_virtual_index(n, quantiles, 3 / 8.0, 3 / 8.0), + fix_gamma=lambda gamma, _: gamma, + ), + # --- OTHER METHODS + lower=dict( + get_virtual_index=lambda n, quantiles: np.floor( + (n - 1) * quantiles).astype(np.intp), + fix_gamma=None, # should never be called, index dtype is int + ), + higher=dict( + get_virtual_index=lambda n, quantiles: np.ceil( + (n - 1) * quantiles).astype(np.intp), + fix_gamma=None, # should never be called, index dtype is int + ), + midpoint=dict( + get_virtual_index=lambda n, quantiles: 0.5 * ( + np.floor((n - 1) * quantiles) + + np.ceil((n - 1) * quantiles)), + fix_gamma=lambda gamma, index: _get_gamma_mask( + shape=gamma.shape, + default_value=0.5, + conditioned_value=0., + where=index % 1 == 0), + ), + nearest=dict( + get_virtual_index=lambda n, quantiles: np.around( + (n - 1) * quantiles).astype(np.intp), + fix_gamma=None, + # should never be called, index dtype is int + )) + + +def _rot90_dispatcher(m, k=None, axes=None): + return (m,) + + +@array_function_dispatch(_rot90_dispatcher) +def rot90(m, k=1, axes=(0, 1)): + """ + Rotate an array by 90 degrees in the plane specified by axes. + + Rotation direction is from the first towards the second axis. + This means for a 2D array with the default `k` and `axes`, the + rotation will be counterclockwise. + + Parameters + ---------- + m : array_like + Array of two or more dimensions. + k : integer + Number of times the array is rotated by 90 degrees. + axes : (2,) array_like + The array is rotated in the plane defined by the axes. + Axes must be different. + + .. versionadded:: 1.12.0 + + Returns + ------- + y : ndarray + A rotated view of `m`. + + See Also + -------- + flip : Reverse the order of elements in an array along the given axis. + fliplr : Flip an array horizontally. + flipud : Flip an array vertically. + + Notes + ----- + ``rot90(m, k=1, axes=(1,0))`` is the reverse of + ``rot90(m, k=1, axes=(0,1))`` + + ``rot90(m, k=1, axes=(1,0))`` is equivalent to + ``rot90(m, k=-1, axes=(0,1))`` + + Examples + -------- + >>> m = np.array([[1,2],[3,4]], int) + >>> m + array([[1, 2], + [3, 4]]) + >>> np.rot90(m) + array([[2, 4], + [1, 3]]) + >>> np.rot90(m, 2) + array([[4, 3], + [2, 1]]) + >>> m = np.arange(8).reshape((2,2,2)) + >>> np.rot90(m, 1, (1,2)) + array([[[1, 3], + [0, 2]], + [[5, 7], + [4, 6]]]) + + """ + axes = tuple(axes) + if len(axes) != 2: + raise ValueError("len(axes) must be 2.") + + m = asanyarray(m) + + if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: + raise ValueError("Axes must be different.") + + if (axes[0] >= m.ndim or axes[0] < -m.ndim + or axes[1] >= m.ndim or axes[1] < -m.ndim): + raise ValueError("Axes={} out of range for array of ndim={}." + .format(axes, m.ndim)) + + k %= 4 + + if k == 0: + return m[:] + if k == 2: + return flip(flip(m, axes[0]), axes[1]) + + axes_list = arange(0, m.ndim) + (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], + axes_list[axes[0]]) + + if k == 1: + return transpose(flip(m, axes[1]), axes_list) + else: + # k == 3 + return flip(transpose(m, axes_list), axes[1]) + + +def _flip_dispatcher(m, axis=None): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def flip(m, axis=None): + """ + Reverse the order of elements in an array along the given axis. + + The shape of the array is preserved, but the elements are reordered. + + .. versionadded:: 1.12.0 + + Parameters + ---------- + m : array_like + Input array. + axis : None or int or tuple of ints, optional + Axis or axes along which to flip over. The default, + axis=None, will flip over all of the axes of the input array. + If axis is negative it counts from the last to the first axis. + + If axis is a tuple of ints, flipping is performed on all of the axes + specified in the tuple. + + .. versionchanged:: 1.15.0 + None and tuples of axes are supported + + Returns + ------- + out : array_like + A view of `m` with the entries of axis reversed. Since a view is + returned, this operation is done in constant time. + + See Also + -------- + flipud : Flip an array vertically (axis=0). + fliplr : Flip an array horizontally (axis=1). + + Notes + ----- + flip(m, 0) is equivalent to flipud(m). + + flip(m, 1) is equivalent to fliplr(m). + + flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. + + flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all + positions. + + flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at + position 0 and position 1. + + Examples + -------- + >>> A = np.arange(8).reshape((2,2,2)) + >>> A + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.flip(A, 0) + array([[[4, 5], + [6, 7]], + [[0, 1], + [2, 3]]]) + >>> np.flip(A, 1) + array([[[2, 3], + [0, 1]], + [[6, 7], + [4, 5]]]) + >>> np.flip(A) + array([[[7, 6], + [5, 4]], + [[3, 2], + [1, 0]]]) + >>> np.flip(A, (0, 2)) + array([[[5, 4], + [7, 6]], + [[1, 0], + [3, 2]]]) + >>> A = np.random.randn(3,4,5) + >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) + True + """ + if not hasattr(m, 'ndim'): + m = asarray(m) + if axis is None: + indexer = (np.s_[::-1],) * m.ndim + else: + axis = _nx.normalize_axis_tuple(axis, m.ndim) + indexer = [np.s_[:]] * m.ndim + for ax in axis: + indexer[ax] = np.s_[::-1] + indexer = tuple(indexer) + return m[indexer] + + +@set_module('numpy') +def iterable(y): + """ + Check whether or not an object can be iterated over. + + Parameters + ---------- + y : object + Input object. + + Returns + ------- + b : bool + Return ``True`` if the object has an iterator method or is a + sequence and ``False`` otherwise. + + + Examples + -------- + >>> np.iterable([1, 2, 3]) + True + >>> np.iterable(2) + False + + Notes + ----- + In most cases, the results of ``np.iterable(obj)`` are consistent with + ``isinstance(obj, collections.abc.Iterable)``. One notable exception is + the treatment of 0-dimensional arrays:: + + >>> from collections.abc import Iterable + >>> a = np.array(1.0) # 0-dimensional numpy array + >>> isinstance(a, Iterable) + True + >>> np.iterable(a) + False + + """ + try: + iter(y) + except TypeError: + return False + return True + + +def _weights_are_valid(weights, a, axis): + """Validate weights array. + + We assume, weights is not None. + """ + wgt = np.asanyarray(weights) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.shape != tuple(a.shape[ax] for ax in axis): + raise ValueError( + "Shape of weights must be consistent with " + "shape of a along specified axis.") + + # setup wgt to broadcast along axis + wgt = wgt.transpose(np.argsort(axis)) + wgt = wgt.reshape(tuple((s if ax in axis else 1) + for ax, s in enumerate(a.shape))) + return wgt + + +def _average_dispatcher(a, axis=None, weights=None, returned=None, *, + keepdims=None): + return (a, weights) + + +@array_function_dispatch(_average_dispatcher) +def average(a, axis=None, weights=None, returned=False, *, + keepdims=np._NoValue): + """ + Compute the weighted average along the specified axis. + + Parameters + ---------- + a : array_like + Array containing data to be averaged. If `a` is not an array, a + conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which to average `a`. The default, + `axis=None`, will average over all of the elements of the input array. + If axis is negative it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the average according to its associated weight. + The array of weights must be the same shape as `a` if no axis is + specified, otherwise the weights must have dimensions and shape + consistent with `a` along the specified axis. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + The calculation is:: + + avg = sum(a * weights) / sum(weights) + + where the sum is over all included elements. + The only constraint on the values of `weights` is that `sum(weights)` + must not be 0. + returned : bool, optional + Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) + is returned, otherwise only the average is returned. + If `weights=None`, `sum_of_weights` is equivalent to the number of + elements over which the average is taken. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + *Note:* `keepdims` will not work with instances of `numpy.matrix` + or other classes whose methods do not support `keepdims`. + + .. versionadded:: 1.23.0 + + Returns + ------- + retval, [sum_of_weights] : array_type or double + Return the average along the specified axis. When `returned` is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. `sum_of_weights` is of the + same type as `retval`. The result dtype follows a general pattern. + If `weights` is None, the result dtype will be that of `a` , or ``float64`` + if `a` is integral. Otherwise, if `weights` is not None and `a` is non- + integral, the result type will be the type of lowest precision capable of + representing values of both `a` and `weights`. If `a` happens to be + integral, the previous rules still applies but the result dtype will + at least be ``float64``. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. See `numpy.ma.average` for a + version robust to this type of error. + TypeError + When `weights` does not have the same shape as `a`, and `axis=None`. + ValueError + When `weights` does not have dimensions and shape consistent with `a` + along specified `axis`. + + See Also + -------- + mean + + ma.average : average for masked arrays -- useful if your data contains + "missing" values + numpy.result_type : Returns the type that results from applying the + numpy type promotion rules to the arguments. + + Examples + -------- + >>> data = np.arange(1, 5) + >>> data + array([1, 2, 3, 4]) + >>> np.average(data) + 2.5 + >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) + 4.0 + + >>> data = np.arange(6).reshape((3, 2)) + >>> data + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.average(data, axis=1, weights=[1./4, 3./4]) + array([0.75, 2.75, 4.75]) + >>> np.average(data, weights=[1./4, 3./4]) + Traceback (most recent call last): + ... + TypeError: Axis must be specified when shapes of a and weights differ. + + With ``keepdims=True``, the following result has shape (3, 1). + + >>> np.average(data, axis=1, keepdims=True) + array([[0.5], + [2.5], + [4.5]]) + + >>> data = np.arange(8).reshape((2, 2, 2)) + >>> data + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.average(data, axis=(0, 1), weights=[[1./4, 3./4], [1., 1./2]]) + array([3.4, 4.4]) + >>> np.average(data, axis=0, weights=[[1./4, 3./4], [1., 1./2]]) + Traceback (most recent call last): + ... + ValueError: Shape of weights must be consistent + with shape of a along specified axis. + """ + a = np.asanyarray(a) + + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + + if keepdims is np._NoValue: + # Don't pass on the keepdims argument if one wasn't given. + keepdims_kw = {} + else: + keepdims_kw = {'keepdims': keepdims} + + if weights is None: + avg = a.mean(axis, **keepdims_kw) + avg_as_array = np.asanyarray(avg) + scl = avg_as_array.dtype.type(a.size/avg_as_array.size) + else: + wgt = _weights_are_valid(weights=weights, a=a, axis=axis) + + if issubclass(a.dtype.type, (np.integer, np.bool)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) + if np.any(scl == 0.0): + raise ZeroDivisionError( + "Weights sum to zero, can't be normalized") + + avg = avg_as_array = np.multiply(a, wgt, + dtype=result_dtype).sum(axis, **keepdims_kw) / scl + + if returned: + if scl.shape != avg_as_array.shape: + scl = np.broadcast_to(scl, avg_as_array.shape).copy() + return avg, scl + else: + return avg + + +@set_module('numpy') +def asarray_chkfinite(a, dtype=None, order=None): + """Convert the input to an array, checking for NaNs or Infs. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. Success requires no NaNs or Infs. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'C'. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + Raises + ------ + ValueError + Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). + + See Also + -------- + asarray : Create and array. + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array. If all elements are finite + ``asarray_chkfinite`` is identical to ``asarray``. + + >>> a = [1, 2] + >>> np.asarray_chkfinite(a, dtype=float) + array([1., 2.]) + + Raises ValueError if array_like contains Nans or Infs. + + >>> a = [1, 2, np.inf] + >>> try: + ... np.asarray_chkfinite(a) + ... except ValueError: + ... print('ValueError') + ... + ValueError + + """ + a = asarray(a, dtype=dtype, order=order) + if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): + raise ValueError( + "array must not contain infs or NaNs") + return a + + +def _piecewise_dispatcher(x, condlist, funclist, *args, **kw): + yield x + # support the undocumented behavior of allowing scalars + if np.iterable(condlist): + yield from condlist + + +@array_function_dispatch(_piecewise_dispatcher) +def piecewise(x, condlist, funclist, *args, **kw): + """ + Evaluate a piecewise-defined function. + + Given a set of conditions and corresponding functions, evaluate each + function on the input data wherever its condition is true. + + Parameters + ---------- + x : ndarray or scalar + The input domain. + condlist : list of bool arrays or bool scalars + Each boolean array corresponds to a function in `funclist`. Wherever + `condlist[i]` is True, `funclist[i](x)` is used as the output value. + + Each boolean array in `condlist` selects a piece of `x`, + and should therefore be of the same shape as `x`. + + The length of `condlist` must correspond to that of `funclist`. + If one extra function is given, i.e. if + ``len(funclist) == len(condlist) + 1``, then that extra function + is the default value, used wherever all conditions are false. + funclist : list of callables, f(x,*args,**kw), or scalars + Each function is evaluated over `x` wherever its corresponding + condition is True. It should take a 1d array as input and give an 1d + array or a scalar value as output. If, instead of a callable, + a scalar is provided then a constant function (``lambda x: scalar``) is + assumed. + args : tuple, optional + Any further arguments given to `piecewise` are passed to the functions + upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then + each function is called as ``f(x, 1, 'a')``. + kw : dict, optional + Keyword arguments used in calling `piecewise` are passed to the + functions upon execution, i.e., if called + ``piecewise(..., ..., alpha=1)``, then each function is called as + ``f(x, alpha=1)``. + + Returns + ------- + out : ndarray + The output is the same shape and type as x and is found by + calling the functions in `funclist` on the appropriate portions of `x`, + as defined by the boolean arrays in `condlist`. Portions not covered + by any condition have a default value of 0. + + + See Also + -------- + choose, select, where + + Notes + ----- + This is similar to choose or select, except that functions are + evaluated on elements of `x` that satisfy the corresponding condition from + `condlist`. + + The result is:: + + |-- + |funclist[0](x[condlist[0]]) + out = |funclist[1](x[condlist[1]]) + |... + |funclist[n2](x[condlist[n2]]) + |-- + + Examples + -------- + Define the signum function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. + + >>> x = np.linspace(-2.5, 2.5, 6) + >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) + array([-1., -1., -1., 1., 1., 1.]) + + Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for + ``x >= 0``. + + >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) + array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) + + Apply the same function to a scalar value. + + >>> y = -2 + >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) + array(2) + + """ + x = asanyarray(x) + n2 = len(funclist) + + # undocumented: single condition is promoted to a list of one condition + if isscalar(condlist) or ( + not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): + condlist = [condlist] + + condlist = asarray(condlist, dtype=bool) + n = len(condlist) + + if n == n2 - 1: # compute the "otherwise" condition. + condelse = ~np.any(condlist, axis=0, keepdims=True) + condlist = np.concatenate([condlist, condelse], axis=0) + n += 1 + elif n != n2: + raise ValueError( + "with {} condition(s), either {} or {} functions are expected" + .format(n, n, n+1) + ) + + y = zeros_like(x) + for cond, func in zip(condlist, funclist): + if not isinstance(func, collections.abc.Callable): + y[cond] = func + else: + vals = x[cond] + if vals.size > 0: + y[cond] = func(vals, *args, **kw) + + return y + + +def _select_dispatcher(condlist, choicelist, default=None): + yield from condlist + yield from choicelist + + +@array_function_dispatch(_select_dispatcher) +def select(condlist, choicelist, default=0): + """ + Return an array drawn from elements in choicelist, depending on conditions. + + Parameters + ---------- + condlist : list of bool ndarrays + The list of conditions which determine from which array in `choicelist` + the output elements are taken. When multiple conditions are satisfied, + the first one encountered in `condlist` is used. + choicelist : list of ndarrays + The list of arrays from which the output elements are taken. It has + to be of the same length as `condlist`. + default : scalar, optional + The element inserted in `output` when all conditions evaluate to False. + + Returns + ------- + output : ndarray + The output at position m is the m-th element of the array in + `choicelist` where the m-th element of the corresponding array in + `condlist` is True. + + See Also + -------- + where : Return elements from one of two arrays depending on condition. + take, choose, compress, diag, diagonal + + Examples + -------- + Beginning with an array of integers from 0 to 5 (inclusive), + elements less than ``3`` are negated, elements greater than ``3`` + are squared, and elements not meeting either of these conditions + (exactly ``3``) are replaced with a `default` value of ``42``. + + >>> x = np.arange(6) + >>> condlist = [x<3, x>3] + >>> choicelist = [x, x**2] + >>> np.select(condlist, choicelist, 42) + array([ 0, 1, 2, 42, 16, 25]) + + When multiple conditions are satisfied, the first one encountered in + `condlist` is used. + + >>> condlist = [x<=4, x>3] + >>> choicelist = [x, x**2] + >>> np.select(condlist, choicelist, 55) + array([ 0, 1, 2, 3, 4, 25]) + + """ + # Check the size of condlist and choicelist are the same, or abort. + if len(condlist) != len(choicelist): + raise ValueError( + 'list of cases must be same length as list of conditions') + + # Now that the dtype is known, handle the deprecated select([], []) case + if len(condlist) == 0: + raise ValueError("select with an empty condition list is not possible") + + # TODO: This preserves the Python int, float, complex manually to get the + # right `result_type` with NEP 50. Most likely we will grow a better + # way to spell this (and this can be replaced). + choicelist = [ + choice if type(choice) in (int, float, complex) else np.asarray(choice) + for choice in choicelist] + choicelist.append(default if type(default) in (int, float, complex) + else np.asarray(default)) + + try: + dtype = np.result_type(*choicelist) + except TypeError as e: + msg = f'Choicelist and default value do not have a common dtype: {e}' + raise TypeError(msg) from None + + # Convert conditions to arrays and broadcast conditions and choices + # as the shape is needed for the result. Doing it separately optimizes + # for example when all choices are scalars. + condlist = np.broadcast_arrays(*condlist) + choicelist = np.broadcast_arrays(*choicelist) + + # If cond array is not an ndarray in boolean format or scalar bool, abort. + for i, cond in enumerate(condlist): + if cond.dtype.type is not np.bool: + raise TypeError( + 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) + + if choicelist[0].ndim == 0: + # This may be common, so avoid the call. + result_shape = condlist[0].shape + else: + result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape + + result = np.full(result_shape, choicelist[-1], dtype) + + # Use np.copyto to burn each choicelist array onto result, using the + # corresponding condlist as a boolean mask. This is done in reverse + # order since the first choice should take precedence. + choicelist = choicelist[-2::-1] + condlist = condlist[::-1] + for choice, cond in zip(choicelist, condlist): + np.copyto(result, choice, where=cond) + + return result + + +def _copy_dispatcher(a, order=None, subok=None): + return (a,) + + +@array_function_dispatch(_copy_dispatcher) +def copy(a, order='K', subok=False): + """ + Return an array copy of the given object. + + Parameters + ---------- + a : array_like + Input data. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :meth:`ndarray.copy` are very + similar, but have different default values for their order= + arguments.) + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise the + returned array will be forced to be a base-class array (defaults to False). + + .. versionadded:: 1.19.0 + + Returns + ------- + arr : ndarray + Array interpretation of `a`. + + See Also + -------- + ndarray.copy : Preferred method for creating an array copy + + Notes + ----- + This is equivalent to: + + >>> np.array(a, copy=True) #doctest: +SKIP + + The copy made of the data is shallow, i.e., for arrays with object dtype, + the new array will point to the same objects. + See Examples from `ndarray.copy`. + + Examples + -------- + Create an array x, with a reference y and a copy z: + + >>> x = np.array([1, 2, 3]) + >>> y = x + >>> z = np.copy(x) + + Note that, when we modify x, y changes, but not z: + + >>> x[0] = 10 + >>> x[0] == y[0] + True + >>> x[0] == z[0] + False + + Note that, np.copy clears previously set WRITEABLE=False flag. + + >>> a = np.array([1, 2, 3]) + >>> a.flags["WRITEABLE"] = False + >>> b = np.copy(a) + >>> b.flags["WRITEABLE"] + True + >>> b[0] = 3 + >>> b + array([3, 2, 3]) + """ + return array(a, order=order, subok=subok, copy=True) + +# Basic operations + + +def _gradient_dispatcher(f, *varargs, axis=None, edge_order=None): + yield f + yield from varargs + + +@array_function_dispatch(_gradient_dispatcher) +def gradient(f, *varargs, axis=None, edge_order=1): + """ + Return the gradient of an N-dimensional array. + + The gradient is computed using second order accurate central differences + in the interior points and either first or second order accurate one-sides + (forward or backwards) differences at the boundaries. + The returned gradient hence has the same shape as the input array. + + Parameters + ---------- + f : array_like + An N-dimensional array containing samples of a scalar function. + varargs : list of scalar or array, optional + Spacing between f values. Default unitary spacing for all dimensions. + Spacing can be specified using: + + 1. single scalar to specify a sample distance for all dimensions. + 2. N scalars to specify a constant sample distance for each dimension. + i.e. `dx`, `dy`, `dz`, ... + 3. N arrays to specify the coordinates of the values along each + dimension of F. The length of the array must match the size of + the corresponding dimension + 4. Any combination of N scalars/arrays with the meaning of 2. and 3. + + If `axis` is given, the number of varargs must equal the number of axes. + Default: 1. + + edge_order : {1, 2}, optional + Gradient is calculated using N-th order accurate differences + at the boundaries. Default: 1. + + .. versionadded:: 1.9.1 + + axis : None or int or tuple of ints, optional + Gradient is calculated only along the given axis or axes + The default (axis = None) is to calculate the gradient for all the axes + of the input array. axis may be negative, in which case it counts from + the last to the first axis. + + .. versionadded:: 1.11.0 + + Returns + ------- + gradient : ndarray or list of ndarray + A list of ndarrays (or a single ndarray if there is only one dimension) + corresponding to the derivatives of f with respect to each dimension. + Each derivative has the same shape as f. + + Examples + -------- + >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) + >>> np.gradient(f) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + >>> np.gradient(f, 2) + array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) + + Spacing can be also specified with an array that represents the coordinates + of the values F along the dimensions. + For instance a uniform spacing: + + >>> x = np.arange(f.size) + >>> np.gradient(f, x) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + + Or a non uniform one: + + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) + >>> np.gradient(f, x) + array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) + + For two dimensional arrays, the return will be two arrays ordered by + axis. In this example the first array stands for the gradient in + rows and the second one in columns direction: + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) + [array([[ 2., 2., -1.], + [ 2., 2., -1.]]), array([[1. , 2.5, 4. ], + [1. , 1. , 1. ]])] + + In this example the spacing is also specified: + uniform for axis=0 and non uniform for axis=1 + + >>> dx = 2. + >>> y = [1., 1.5, 3.5] + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) + [array([[ 1. , 1. , -0.5], + [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ], + [2. , 1.7, 0.5]])] + + It is possible to specify how boundaries are treated using `edge_order` + + >>> x = np.array([0, 1, 2, 3, 4]) + >>> f = x**2 + >>> np.gradient(f, edge_order=1) + array([1., 2., 4., 6., 7.]) + >>> np.gradient(f, edge_order=2) + array([0., 2., 4., 6., 8.]) + + The `axis` keyword can be used to specify a subset of axes of which the + gradient is calculated + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) + array([[ 2., 2., -1.], + [ 2., 2., -1.]]) + + Notes + ----- + Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous + derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we + minimize the "consistency error" :math:`\\eta_{i}` between the true gradient + and its estimate from a linear combination of the neighboring grid-points: + + .. math:: + + \\eta_{i} = f_{i}^{\\left(1\\right)} - + \\left[ \\alpha f\\left(x_{i}\\right) + + \\beta f\\left(x_{i} + h_{d}\\right) + + \\gamma f\\left(x_{i}-h_{s}\\right) + \\right] + + By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` + with their Taylor series expansion, this translates into solving + the following the linear system: + + .. math:: + + \\left\\{ + \\begin{array}{r} + \\alpha+\\beta+\\gamma=0 \\\\ + \\beta h_{d}-\\gamma h_{s}=1 \\\\ + \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 + \\end{array} + \\right. + + The resulting approximation of :math:`f_{i}^{(1)}` is the following: + + .. math:: + + \\hat f_{i}^{(1)} = + \\frac{ + h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) + - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} + { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} + + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + + h_{s}h_{d}^{2}}{h_{d} + + h_{s}}\\right) + + It is worth noting that if :math:`h_{s}=h_{d}` + (i.e., data are evenly spaced) + we find the standard second order approximation: + + .. math:: + + \\hat f_{i}^{(1)}= + \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + + \\mathcal{O}\\left(h^{2}\\right) + + With a similar procedure the forward/backward approximations used for + boundaries can be derived. + + References + ---------- + .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics + (Texts in Applied Mathematics). New York: Springer. + .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations + in Geophysical Fluid Dynamics. New York: Springer. + .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids, + Mathematics of Computation 51, no. 184 : 699-706. + `PDF `_. + """ + f = np.asanyarray(f) + N = f.ndim # number of dimensions + + if axis is None: + axes = tuple(range(N)) + else: + axes = _nx.normalize_axis_tuple(axis, N) + + len_axes = len(axes) + n = len(varargs) + if n == 0: + # no spacing argument - use 1 in all axes + dx = [1.0] * len_axes + elif n == 1 and np.ndim(varargs[0]) == 0: + # single scalar for all axes + dx = varargs * len_axes + elif n == len_axes: + # scalar or 1d array for each axis + dx = list(varargs) + for i, distances in enumerate(dx): + distances = np.asanyarray(distances) + if distances.ndim == 0: + continue + elif distances.ndim != 1: + raise ValueError("distances must be either scalars or 1d") + if len(distances) != f.shape[axes[i]]: + raise ValueError("when 1d, distances must match " + "the length of the corresponding dimension") + if np.issubdtype(distances.dtype, np.integer): + # Convert numpy integer types to float64 to avoid modular + # arithmetic in np.diff(distances). + distances = distances.astype(np.float64) + diffx = np.diff(distances) + # if distances are constant reduce to the scalar case + # since it brings a consistent speedup + if (diffx == diffx[0]).all(): + diffx = diffx[0] + dx[i] = diffx + else: + raise TypeError("invalid number of arguments") + + if edge_order > 2: + raise ValueError("'edge_order' greater than 2 not supported") + + # use central differences on interior and one-sided differences on the + # endpoints. This preserves second order-accuracy over the full domain. + + outvals = [] + + # create slice objects --- initially all are [:, :, ..., :] + slice1 = [slice(None)]*N + slice2 = [slice(None)]*N + slice3 = [slice(None)]*N + slice4 = [slice(None)]*N + + otype = f.dtype + if otype.type is np.datetime64: + # the timedelta dtype with the same unit information + otype = np.dtype(otype.name.replace('datetime', 'timedelta')) + # view as timedelta to allow addition + f = f.view(otype) + elif otype.type is np.timedelta64: + pass + elif np.issubdtype(otype, np.inexact): + pass + else: + # All other types convert to floating point. + # First check if f is a numpy integer type; if so, convert f to float64 + # to avoid modular arithmetic when computing the changes in f. + if np.issubdtype(otype, np.integer): + f = f.astype(np.float64) + otype = np.float64 + + for axis, ax_dx in zip(axes, dx): + if f.shape[axis] < edge_order + 1: + raise ValueError( + "Shape of array too small to calculate a numerical gradient, " + "at least (edge_order + 1) elements are required.") + # result allocation + out = np.empty_like(f, dtype=otype) + + # spacing for the current axis + uniform_spacing = np.ndim(ax_dx) == 0 + + # Numerical differentiation: 2nd order interior + slice1[axis] = slice(1, -1) + slice2[axis] = slice(None, -2) + slice3[axis] = slice(1, -1) + slice4[axis] = slice(2, None) + + if uniform_spacing: + out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx) + else: + dx1 = ax_dx[0:-1] + dx2 = ax_dx[1:] + a = -(dx2)/(dx1 * (dx1 + dx2)) + b = (dx2 - dx1) / (dx1 * dx2) + c = dx1 / (dx2 * (dx1 + dx2)) + # fix the shape for broadcasting + shape = np.ones(N, dtype=int) + shape[axis] = -1 + a.shape = b.shape = c.shape = shape + # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + + # Numerical differentiation: 1st order edges + if edge_order == 1: + slice1[axis] = 0 + slice2[axis] = 1 + slice3[axis] = 0 + dx_0 = ax_dx if uniform_spacing else ax_dx[0] + # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 + + slice1[axis] = -1 + slice2[axis] = -1 + slice3[axis] = -2 + dx_n = ax_dx if uniform_spacing else ax_dx[-1] + # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n + + # Numerical differentiation: 2nd order edges + else: + slice1[axis] = 0 + slice2[axis] = 0 + slice3[axis] = 1 + slice4[axis] = 2 + if uniform_spacing: + a = -1.5 / ax_dx + b = 2. / ax_dx + c = -0.5 / ax_dx + else: + dx1 = ax_dx[0] + dx2 = ax_dx[1] + a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) + b = (dx1 + dx2) / (dx1 * dx2) + c = - dx1 / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + + slice1[axis] = -1 + slice2[axis] = -3 + slice3[axis] = -2 + slice4[axis] = -1 + if uniform_spacing: + a = 0.5 / ax_dx + b = -2. / ax_dx + c = 1.5 / ax_dx + else: + dx1 = ax_dx[-2] + dx2 = ax_dx[-1] + a = (dx2) / (dx1 * (dx1 + dx2)) + b = - (dx2 + dx1) / (dx1 * dx2) + c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + + outvals.append(out) + + # reset the slice object in this dimension to ":" + slice1[axis] = slice(None) + slice2[axis] = slice(None) + slice3[axis] = slice(None) + slice4[axis] = slice(None) + + if len_axes == 1: + return outvals[0] + return tuple(outvals) + + +def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None): + return (a, prepend, append) + + +@array_function_dispatch(_diff_dispatcher) +def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): + """ + Calculate the n-th discrete difference along the given axis. + + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend, append : array_like, optional + Values to prepend or append to `a` along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match `a` except along axis. + + .. versionadded:: 1.16.0 + + Returns + ------- + diff : ndarray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. + + See Also + -------- + gradient, ediff1d, cumsum + + Notes + ----- + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. + + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: + + >>> u8_arr = np.array([1, 0], dtype=np.uint8) + >>> np.diff(u8_arr) + array([255], dtype=uint8) + >>> u8_arr[1,...] - u8_arr[0,...] + 255 + + If this is not desirable, then the array should be cast to a larger + integer type first: + + >>> i16_arr = u8_arr.astype(np.int16) + >>> np.diff(i16_arr) + array([-1], dtype=int16) + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.diff(x) + array([ 1, 2, 3, -7]) + >>> np.diff(x, n=2) + array([ 1, 1, -10]) + + >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) + >>> np.diff(x) + array([[2, 3, 4], + [5, 1, 2]]) + >>> np.diff(x, axis=0) + array([[-1, 2, 0, -2]]) + + >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) + >>> np.diff(x) + array([1, 1], dtype='timedelta64[D]') + + """ + if n == 0: + return a + if n < 0: + raise ValueError( + "order must be non-negative but got " + repr(n)) + + a = asanyarray(a) + nd = a.ndim + if nd == 0: + raise ValueError("diff requires input that is at least one dimensional") + axis = normalize_axis_index(axis, nd) + + combined = [] + if prepend is not np._NoValue: + prepend = np.asanyarray(prepend) + if prepend.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + prepend = np.broadcast_to(prepend, tuple(shape)) + combined.append(prepend) + + combined.append(a) + + if append is not np._NoValue: + append = np.asanyarray(append) + if append.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + append = np.broadcast_to(append, tuple(shape)) + combined.append(append) + + if len(combined) > 1: + a = np.concatenate(combined, axis) + + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + slice1 = tuple(slice1) + slice2 = tuple(slice2) + + op = not_equal if a.dtype == np.bool else subtract + for _ in range(n): + a = op(a[slice1], a[slice2]) + + return a + + +def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None): + return (x, xp, fp) + + +@array_function_dispatch(_interp_dispatcher) +def interp(x, xp, fp, left=None, right=None, period=None): + """ + One-dimensional linear interpolation for monotonically increasing sample points. + + Returns the one-dimensional piecewise linear interpolant to a function + with given discrete data points (`xp`, `fp`), evaluated at `x`. + + Parameters + ---------- + x : array_like + The x-coordinates at which to evaluate the interpolated values. + + xp : 1-D sequence of floats + The x-coordinates of the data points, must be increasing if argument + `period` is not specified. Otherwise, `xp` is internally sorted after + normalizing the periodic boundaries with ``xp = xp % period``. + + fp : 1-D sequence of float or complex + The y-coordinates of the data points, same length as `xp`. + + left : optional float or complex corresponding to fp + Value to return for `x < xp[0]`, default is `fp[0]`. + + right : optional float or complex corresponding to fp + Value to return for `x > xp[-1]`, default is `fp[-1]`. + + period : None or float, optional + A period for the x-coordinates. This parameter allows the proper + interpolation of angular x-coordinates. Parameters `left` and `right` + are ignored if `period` is specified. + + .. versionadded:: 1.10.0 + + Returns + ------- + y : float or complex (corresponding to fp) or ndarray + The interpolated values, same shape as `x`. + + Raises + ------ + ValueError + If `xp` and `fp` have different length + If `xp` or `fp` are not 1-D sequences + If `period == 0` + + See Also + -------- + scipy.interpolate + + Warnings + -------- + The x-coordinate sequence is expected to be increasing, but this is not + explicitly enforced. However, if the sequence `xp` is non-increasing, + interpolation results are meaningless. + + Note that, since NaN is unsortable, `xp` also cannot contain NaNs. + + A simple check for `xp` being strictly increasing is:: + + np.all(np.diff(xp) > 0) + + Examples + -------- + >>> xp = [1, 2, 3] + >>> fp = [3, 2, 0] + >>> np.interp(2.5, xp, fp) + 1.0 + >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) + array([3. , 3. , 2.5 , 0.56, 0. ]) + >>> UNDEF = -99.0 + >>> np.interp(3.14, xp, fp, right=UNDEF) + -99.0 + + Plot an interpolant to the sine function: + + >>> x = np.linspace(0, 2*np.pi, 10) + >>> y = np.sin(x) + >>> xvals = np.linspace(0, 2*np.pi, 50) + >>> yinterp = np.interp(xvals, x, y) + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'o') + [] + >>> plt.plot(xvals, yinterp, '-x') + [] + >>> plt.show() + + Interpolation with periodic x-coordinates: + + >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] + >>> xp = [190, -190, 350, -350] + >>> fp = [5, 10, 3, 4] + >>> np.interp(x, xp, fp, period=360) + array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) + + Complex interpolation: + + >>> x = [1.5, 4.0] + >>> xp = [2,3,5] + >>> fp = [1.0j, 0, 2+3j] + >>> np.interp(x, xp, fp) + array([0.+1.j , 1.+1.5j]) + + """ + + fp = np.asarray(fp) + + if np.iscomplexobj(fp): + interp_func = compiled_interp_complex + input_dtype = np.complex128 + else: + interp_func = compiled_interp + input_dtype = np.float64 + + if period is not None: + if period == 0: + raise ValueError("period must be a non-zero value") + period = abs(period) + left = None + right = None + + x = np.asarray(x, dtype=np.float64) + xp = np.asarray(xp, dtype=np.float64) + fp = np.asarray(fp, dtype=input_dtype) + + if xp.ndim != 1 or fp.ndim != 1: + raise ValueError("Data points must be 1-D sequences") + if xp.shape[0] != fp.shape[0]: + raise ValueError("fp and xp are not of the same length") + # normalizing periodic boundaries + x = x % period + xp = xp % period + asort_xp = np.argsort(xp) + xp = xp[asort_xp] + fp = fp[asort_xp] + xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) + fp = np.concatenate((fp[-1:], fp, fp[0:1])) + + return interp_func(x, xp, fp, left, right) + + +def _angle_dispatcher(z, deg=None): + return (z,) + + +@array_function_dispatch(_angle_dispatcher) +def angle(z, deg=False): + """ + Return the angle of the complex argument. + + Parameters + ---------- + z : array_like + A complex number or sequence of complex numbers. + deg : bool, optional + Return angle in degrees if True, radians if False (default). + + Returns + ------- + angle : ndarray or scalar + The counterclockwise angle from the positive real axis on the complex + plane in the range ``(-pi, pi]``, with dtype as numpy.float64. + + .. versionchanged:: 1.16.0 + This function works on subclasses of ndarray like `ma.array`. + + See Also + -------- + arctan2 + absolute + + Notes + ----- + This function passes the imaginary and real parts of the argument to + `arctan2` to compute the result; consequently, it follows the convention + of `arctan2` when the magnitude of the argument is zero. See example. + + Examples + -------- + >>> np.angle([1.0, 1.0j, 1+1j]) # in radians + array([ 0. , 1.57079633, 0.78539816]) # may vary + >>> np.angle(1+1j, deg=True) # in degrees + 45.0 + >>> np.angle([0., -0., complex(0., -0.), complex(-0., -0.)]) # convention + array([ 0. , 3.14159265, -0. , -3.14159265]) + + """ + z = asanyarray(z) + if issubclass(z.dtype.type, _nx.complexfloating): + zimag = z.imag + zreal = z.real + else: + zimag = 0 + zreal = z + + a = arctan2(zimag, zreal) + if deg: + a *= 180/pi + return a + + +def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None): + return (p,) + + +@array_function_dispatch(_unwrap_dispatcher) +def unwrap(p, discont=None, axis=-1, *, period=2*pi): + r""" + Unwrap by taking the complement of large deltas with respect to the period. + + This unwraps a signal `p` by changing elements which have an absolute + difference from their predecessor of more than ``max(discont, period/2)`` + to their `period`-complementary values. + + For the default case where `period` is :math:`2\pi` and `discont` is + :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences + are never greater than :math:`\pi` by adding :math:`2k\pi` for some + integer :math:`k`. + + Parameters + ---------- + p : array_like + Input array. + discont : float, optional + Maximum discontinuity between values, default is ``period/2``. + Values below ``period/2`` are treated as if they were ``period/2``. + To have an effect different from the default, `discont` should be + larger than ``period/2``. + axis : int, optional + Axis along which unwrap will operate, default is the last axis. + period : float, optional + Size of the range over which the input wraps. By default, it is + ``2 pi``. + + .. versionadded:: 1.21.0 + + Returns + ------- + out : ndarray + Output array. + + See Also + -------- + rad2deg, deg2rad + + Notes + ----- + If the discontinuity in `p` is smaller than ``period/2``, + but larger than `discont`, no unwrapping is done because taking + the complement would only make the discontinuity larger. + + Examples + -------- + >>> phase = np.linspace(0, np.pi, num=5) + >>> phase[3:] += np.pi + >>> phase + array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary + >>> np.unwrap(phase) + array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary + >>> np.unwrap([0, 1, 2, -1, 0], period=4) + array([0, 1, 2, 3, 4]) + >>> np.unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6) + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4) + array([2, 3, 4, 5, 6, 7, 8, 9]) + >>> phase_deg = np.mod(np.linspace(0 ,720, 19), 360) - 180 + >>> np.unwrap(phase_deg, period=360) + array([-180., -140., -100., -60., -20., 20., 60., 100., 140., + 180., 220., 260., 300., 340., 380., 420., 460., 500., + 540.]) + """ + p = asarray(p) + nd = p.ndim + dd = diff(p, axis=axis) + if discont is None: + discont = period/2 + slice1 = [slice(None, None)]*nd # full slices + slice1[axis] = slice(1, None) + slice1 = tuple(slice1) + dtype = np.result_type(dd, period) + if _nx.issubdtype(dtype, _nx.integer): + interval_high, rem = divmod(period, 2) + boundary_ambiguous = rem == 0 + else: + interval_high = period / 2 + boundary_ambiguous = True + interval_low = -interval_high + ddmod = mod(dd - interval_low, period) + interval_low + if boundary_ambiguous: + # for `mask = (abs(dd) == period/2)`, the above line made + # `ddmod[mask] == -period/2`. correct these such that + # `ddmod[mask] == sign(dd[mask])*period/2`. + _nx.copyto(ddmod, interval_high, + where=(ddmod == interval_low) & (dd > 0)) + ph_correct = ddmod - dd + _nx.copyto(ph_correct, 0, where=abs(dd) < discont) + up = array(p, copy=True, dtype=dtype) + up[slice1] = p[slice1] + ph_correct.cumsum(axis) + return up + + +def _sort_complex(a): + return (a,) + + +@array_function_dispatch(_sort_complex) +def sort_complex(a): + """ + Sort a complex array using the real part first, then the imaginary part. + + Parameters + ---------- + a : array_like + Input array + + Returns + ------- + out : complex ndarray + Always returns a sorted complex array. + + Examples + -------- + >>> np.sort_complex([5, 3, 6, 2, 1]) + array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) + + >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) + array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) + + """ + b = array(a, copy=True) + b.sort() + if not issubclass(b.dtype.type, _nx.complexfloating): + if b.dtype.char in 'bhBH': + return b.astype('F') + elif b.dtype.char == 'g': + return b.astype('G') + else: + return b.astype('D') + else: + return b + + +def _trim_zeros(filt, trim=None): + return (filt,) + + +@array_function_dispatch(_trim_zeros) +def trim_zeros(filt, trim='fb'): + """ + Trim the leading and/or trailing zeros from a 1-D array or sequence. + + Parameters + ---------- + filt : 1-D array or sequence + Input array. + trim : str, optional + A string with 'f' representing trim from front and 'b' to trim from + back. Default is 'fb', trim zeros from both front and back of the + array. + + Returns + ------- + trimmed : 1-D array or sequence + The result of trimming the input. The input data type is preserved. + + Examples + -------- + >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) + >>> np.trim_zeros(a) + array([1, 2, 3, 0, 2, 1]) + + >>> np.trim_zeros(a, 'b') + array([0, 0, 0, ..., 0, 2, 1]) + + The input data type is preserved, list/tuple in means list/tuple out. + + >>> np.trim_zeros([0, 1, 2, 0]) + [1, 2] + + """ + + first = 0 + trim = trim.upper() + if 'F' in trim: + for i in filt: + if i != 0.: + break + else: + first = first + 1 + last = len(filt) + if 'B' in trim: + for i in filt[::-1]: + if i != 0.: + break + else: + last = last - 1 + return filt[first:last] + + +def _extract_dispatcher(condition, arr): + return (condition, arr) + + +@array_function_dispatch(_extract_dispatcher) +def extract(condition, arr): + """ + Return the elements of an array that satisfy some condition. + + This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If + `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. + + Note that `place` does the exact opposite of `extract`. + + Parameters + ---------- + condition : array_like + An array whose nonzero or True entries indicate the elements of `arr` + to extract. + arr : array_like + Input array of the same size as `condition`. + + Returns + ------- + extract : ndarray + Rank 1 array of values from `arr` where `condition` is True. + + See Also + -------- + take, put, copyto, compress, place + + Examples + -------- + >>> arr = np.arange(12).reshape((3, 4)) + >>> arr + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> condition = np.mod(arr, 3)==0 + >>> condition + array([[ True, False, False, True], + [False, False, True, False], + [False, True, False, False]]) + >>> np.extract(condition, arr) + array([0, 3, 6, 9]) + + + If `condition` is boolean: + + >>> arr[condition] + array([0, 3, 6, 9]) + + """ + return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) + + +def _place_dispatcher(arr, mask, vals): + return (arr, mask, vals) + + +@array_function_dispatch(_place_dispatcher) +def place(arr, mask, vals): + """ + Change elements of an array based on conditional and input values. + + Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that + `place` uses the first N elements of `vals`, where N is the number of + True values in `mask`, while `copyto` uses the elements where `mask` + is True. + + Note that `extract` does the exact opposite of `place`. + + Parameters + ---------- + arr : ndarray + Array to put data into. + mask : array_like + Boolean mask array. Must have the same size as `a`. + vals : 1-D sequence + Values to put into `a`. Only the first N elements are used, where + N is the number of True values in `mask`. If `vals` is smaller + than N, it will be repeated, and if elements of `a` are to be masked, + this sequence must be non-empty. + + See Also + -------- + copyto, put, take, extract + + Examples + -------- + >>> arr = np.arange(6).reshape(2, 3) + >>> np.place(arr, arr>2, [44, 55]) + >>> arr + array([[ 0, 1, 2], + [44, 55, 44]]) + + """ + return _place(arr, mask, vals) + + +def disp(mesg, device=None, linefeed=True): + """ + Display a message on a device. + + .. deprecated:: 2.0 + Use your own printing function instead. + + Parameters + ---------- + mesg : str + Message to display. + device : object + Device to write message. If None, defaults to ``sys.stdout`` which is + very similar to ``print``. `device` needs to have ``write()`` and + ``flush()`` methods. + linefeed : bool, optional + Option whether to print a line feed or not. Defaults to True. + + Raises + ------ + AttributeError + If `device` does not have a ``write()`` or ``flush()`` method. + + Examples + -------- + Besides ``sys.stdout``, a file-like object can also be used as it has + both required methods: + + >>> from io import StringIO + >>> buf = StringIO() + >>> np.disp('"Display" in a file', device=buf) + >>> buf.getvalue() + '"Display" in a file\\n' + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`disp` is deprecated, " + "use your own printing function instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + if device is None: + device = sys.stdout + if linefeed: + device.write('%s\n' % mesg) + else: + device.write('%s' % mesg) + device.flush() + return + + +# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html +_DIMENSION_NAME = r'\w+' +_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) +_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) +_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) +_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) + + +def _parse_gufunc_signature(signature): + """ + Parse string signatures for a generalized universal function. + + Arguments + --------- + signature : string + Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` + for ``np.matmul``. + + Returns + ------- + Tuple of input and output core dimensions parsed from the signature, each + of the form List[Tuple[str, ...]]. + """ + signature = re.sub(r'\s+', '', signature) + + if not re.match(_SIGNATURE, signature): + raise ValueError( + 'not a valid gufunc signature: {}'.format(signature)) + return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) + for arg in re.findall(_ARGUMENT, arg_list)] + for arg_list in signature.split('->')) + + +def _update_dim_sizes(dim_sizes, arg, core_dims): + """ + Incrementally check and update core dimension sizes for a single argument. + + Arguments + --------- + dim_sizes : Dict[str, int] + Sizes of existing core dimensions. Will be updated in-place. + arg : ndarray + Argument to examine. + core_dims : Tuple[str, ...] + Core dimensions for this argument. + """ + if not core_dims: + return + + num_core_dims = len(core_dims) + if arg.ndim < num_core_dims: + raise ValueError( + '%d-dimensional argument does not have enough ' + 'dimensions for all core dimensions %r' + % (arg.ndim, core_dims)) + + core_shape = arg.shape[-num_core_dims:] + for dim, size in zip(core_dims, core_shape): + if dim in dim_sizes: + if size != dim_sizes[dim]: + raise ValueError( + 'inconsistent size for core dimension %r: %r vs %r' + % (dim, size, dim_sizes[dim])) + else: + dim_sizes[dim] = size + + +def _parse_input_dimensions(args, input_core_dims): + """ + Parse broadcast and core dimensions for vectorize with a signature. + + Arguments + --------- + args : Tuple[ndarray, ...] + Tuple of input arguments to examine. + input_core_dims : List[Tuple[str, ...]] + List of core dimensions corresponding to each input. + + Returns + ------- + broadcast_shape : Tuple[int, ...] + Common shape to broadcast all non-core dimensions to. + dim_sizes : Dict[str, int] + Common sizes for named core dimensions. + """ + broadcast_args = [] + dim_sizes = {} + for arg, core_dims in zip(args, input_core_dims): + _update_dim_sizes(dim_sizes, arg, core_dims) + ndim = arg.ndim - len(core_dims) + dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) + broadcast_args.append(dummy_array) + broadcast_shape = np.lib._stride_tricks_impl._broadcast_shape( + *broadcast_args + ) + return broadcast_shape, dim_sizes + + +def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): + """Helper for calculating broadcast shapes with core dimensions.""" + return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) + for core_dims in list_of_core_dims] + + +def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes, + results=None): + """Helper for creating output arrays in vectorize.""" + shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) + if dtypes is None: + dtypes = [None] * len(shapes) + if results is None: + arrays = tuple(np.empty(shape=shape, dtype=dtype) + for shape, dtype in zip(shapes, dtypes)) + else: + arrays = tuple(np.empty_like(result, shape=shape, dtype=dtype) + for result, shape, dtype + in zip(results, shapes, dtypes)) + return arrays + + +def _get_vectorize_dtype(dtype): + if dtype.char in "SU": + return dtype.char + return dtype + + +@set_module('numpy') +class vectorize: + """ + vectorize(pyfunc=np._NoValue, otypes=None, doc=None, excluded=None, + cache=False, signature=None) + + Returns an object that acts like pyfunc, but takes arrays as input. + + Define a vectorized function which takes a nested sequence of objects or + numpy arrays as inputs and returns a single numpy array or a tuple of numpy + arrays. The vectorized function evaluates `pyfunc` over successive tuples + of the input arrays like the python map function, except it uses the + broadcasting rules of numpy. + + The data type of the output of `vectorized` is determined by calling + the function with the first element of the input. This can be avoided + by specifying the `otypes` argument. + + Parameters + ---------- + pyfunc : callable, optional + A python function or method. + Can be omitted to produce a decorator with keyword arguments. + otypes : str or list of dtypes, optional + The output data type. It must be specified as either a string of + typecode characters or a list of data type specifiers. There should + be one data type specifier for each output. + doc : str, optional + The docstring for the function. If None, the docstring will be the + ``pyfunc.__doc__``. + excluded : set, optional + Set of strings or integers representing the positional or keyword + arguments for which the function will not be vectorized. These will be + passed directly to `pyfunc` unmodified. + + .. versionadded:: 1.7.0 + + cache : bool, optional + If `True`, then cache the first function call that determines the number + of outputs if `otypes` is not provided. + + .. versionadded:: 1.7.0 + + signature : string, optional + Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for + vectorized matrix-vector multiplication. If provided, ``pyfunc`` will + be called with (and expected to return) arrays with shapes given by the + size of corresponding core dimensions. By default, ``pyfunc`` is + assumed to take scalars as input and output. + + .. versionadded:: 1.12.0 + + Returns + ------- + out : callable + A vectorized function if ``pyfunc`` was provided, + a decorator otherwise. + + See Also + -------- + frompyfunc : Takes an arbitrary Python function and returns a ufunc + + Notes + ----- + The `vectorize` function is provided primarily for convenience, not for + performance. The implementation is essentially a for loop. + + If `otypes` is not specified, then a call to the function with the + first argument will be used to determine the number of outputs. The + results of this call will be cached if `cache` is `True` to prevent + calling the function twice. However, to implement the cache, the + original function must be wrapped which will slow down subsequent + calls, so only do this if your function is expensive. + + The new keyword argument interface and `excluded` argument support + further degrades performance. + + References + ---------- + .. [1] :doc:`/reference/c-api/generalized-ufuncs` + + Examples + -------- + >>> def myfunc(a, b): + ... "Return a-b if a>b, otherwise return a+b" + ... if a > b: + ... return a - b + ... else: + ... return a + b + + >>> vfunc = np.vectorize(myfunc) + >>> vfunc([1, 2, 3, 4], 2) + array([3, 4, 1, 2]) + + The docstring is taken from the input function to `vectorize` unless it + is specified: + + >>> vfunc.__doc__ + 'Return a-b if a>b, otherwise return a+b' + >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') + >>> vfunc.__doc__ + 'Vectorized `myfunc`' + + The output type is determined by evaluating the first element of the input, + unless it is specified: + + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + >>> vfunc = np.vectorize(myfunc, otypes=[float]) + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + + The `excluded` argument can be used to prevent vectorizing over certain + arguments. This can be useful for array-like arguments of a fixed length + such as the coefficients for a polynomial as in `polyval`: + + >>> def mypolyval(p, x): + ... _p = list(p) + ... res = _p.pop(0) + ... while _p: + ... res = res*x + _p.pop(0) + ... return res + >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) + >>> vpolyval(p=[1, 2, 3], x=[0, 1]) + array([3, 6]) + + Positional arguments may also be excluded by specifying their position: + + >>> vpolyval.excluded.add(0) + >>> vpolyval([1, 2, 3], x=[0, 1]) + array([3, 6]) + + The `signature` argument allows for vectorizing functions that act on + non-scalar arrays of fixed length. For example, you can use it for a + vectorized calculation of Pearson correlation coefficient and its p-value: + + >>> import scipy.stats + >>> pearsonr = np.vectorize(scipy.stats.pearsonr, + ... signature='(n),(n)->(),()') + >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) + (array([ 1., -1.]), array([ 0., 0.])) + + Or for a vectorized convolution: + + >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') + >>> convolve(np.eye(4), [1, 2, 1]) + array([[1., 2., 1., 0., 0., 0.], + [0., 1., 2., 1., 0., 0.], + [0., 0., 1., 2., 1., 0.], + [0., 0., 0., 1., 2., 1.]]) + + Decorator syntax is supported. The decorator can be called as + a function to provide keyword arguments: + + >>> @np.vectorize + ... def identity(x): + ... return x + ... + >>> identity([0, 1, 2]) + array([0, 1, 2]) + >>> @np.vectorize(otypes=[float]) + ... def as_float(x): + ... return x + ... + >>> as_float([0, 1, 2]) + array([0., 1., 2.]) + """ + def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, + excluded=None, cache=False, signature=None): + + if (pyfunc != np._NoValue) and (not callable(pyfunc)): + #Splitting the error message to keep + #the length below 79 characters. + part1 = "When used as a decorator, " + part2 = "only accepts keyword arguments." + raise TypeError(part1 + part2) + + self.pyfunc = pyfunc + self.cache = cache + self.signature = signature + if pyfunc != np._NoValue and hasattr(pyfunc, '__name__'): + self.__name__ = pyfunc.__name__ + + self._ufunc = {} # Caching to improve default performance + self._doc = None + self.__doc__ = doc + if doc is None and hasattr(pyfunc, '__doc__'): + self.__doc__ = pyfunc.__doc__ + else: + self._doc = doc + + if isinstance(otypes, str): + for char in otypes: + if char not in typecodes['All']: + raise ValueError("Invalid otype specified: %s" % (char,)) + elif iterable(otypes): + otypes = [_get_vectorize_dtype(_nx.dtype(x)) for x in otypes] + elif otypes is not None: + raise ValueError("Invalid otype specification") + self.otypes = otypes + + # Excluded variable support + if excluded is None: + excluded = set() + self.excluded = set(excluded) + + if signature is not None: + self._in_and_out_core_dims = _parse_gufunc_signature(signature) + else: + self._in_and_out_core_dims = None + + def _init_stage_2(self, pyfunc, *args, **kwargs): + self.__name__ = pyfunc.__name__ + self.pyfunc = pyfunc + if self._doc is None: + self.__doc__ = pyfunc.__doc__ + else: + self.__doc__ = self._doc + + def _call_as_normal(self, *args, **kwargs): + """ + Return arrays with the results of `pyfunc` broadcast (vectorized) over + `args` and `kwargs` not in `excluded`. + """ + excluded = self.excluded + if not kwargs and not excluded: + func = self.pyfunc + vargs = args + else: + # The wrapper accepts only positional arguments: we use `names` and + # `inds` to mutate `the_args` and `kwargs` to pass to the original + # function. + nargs = len(args) + + names = [_n for _n in kwargs if _n not in excluded] + inds = [_i for _i in range(nargs) if _i not in excluded] + the_args = list(args) + + def func(*vargs): + for _n, _i in enumerate(inds): + the_args[_i] = vargs[_n] + kwargs.update(zip(names, vargs[len(inds):])) + return self.pyfunc(*the_args, **kwargs) + + vargs = [args[_i] for _i in inds] + vargs.extend([kwargs[_n] for _n in names]) + + return self._vectorize_call(func=func, args=vargs) + + def __call__(self, *args, **kwargs): + if self.pyfunc is np._NoValue: + self._init_stage_2(*args, **kwargs) + return self + + return self._call_as_normal(*args, **kwargs) + + def _get_ufunc_and_otypes(self, func, args): + """Return (ufunc, otypes).""" + # frompyfunc will fail if args is empty + if not args: + raise ValueError('args can not be empty') + + if self.otypes is not None: + otypes = self.otypes + + # self._ufunc is a dictionary whose keys are the number of + # arguments (i.e. len(args)) and whose values are ufuncs created + # by frompyfunc. len(args) can be different for different calls if + # self.pyfunc has parameters with default values. We only use the + # cache when func is self.pyfunc, which occurs when the call uses + # only positional arguments and no arguments are excluded. + + nin = len(args) + nout = len(self.otypes) + if func is not self.pyfunc or nin not in self._ufunc: + ufunc = frompyfunc(func, nin, nout) + else: + ufunc = None # We'll get it from self._ufunc + if func is self.pyfunc: + ufunc = self._ufunc.setdefault(nin, ufunc) + else: + # Get number of outputs and output types by calling the function on + # the first entries of args. We also cache the result to prevent + # the subsequent call when the ufunc is evaluated. + # Assumes that ufunc first evaluates the 0th elements in the input + # arrays (the input values are not checked to ensure this) + args = [asarray(arg) for arg in args] + if builtins.any(arg.size == 0 for arg in args): + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + + inputs = [arg.flat[0] for arg in args] + outputs = func(*inputs) + + # Performance note: profiling indicates that -- for simple + # functions at least -- this wrapping can almost double the + # execution time. + # Hence we make it optional. + if self.cache: + _cache = [outputs] + + def _func(*vargs): + if _cache: + return _cache.pop() + else: + return func(*vargs) + else: + _func = func + + if isinstance(outputs, tuple): + nout = len(outputs) + else: + nout = 1 + outputs = (outputs,) + + otypes = ''.join([asarray(outputs[_k]).dtype.char + for _k in range(nout)]) + + # Performance note: profiling indicates that creating the ufunc is + # not a significant cost compared with wrapping so it seems not + # worth trying to cache this. + ufunc = frompyfunc(_func, len(args), nout) + + return ufunc, otypes + + def _vectorize_call(self, func, args): + """Vectorized call to `func` over positional `args`.""" + if self.signature is not None: + res = self._vectorize_call_with_signature(func, args) + elif not args: + res = func() + else: + ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) + + # Convert args to object arrays first + inputs = [asanyarray(a, dtype=object) for a in args] + + outputs = ufunc(*inputs) + + if ufunc.nout == 1: + res = asanyarray(outputs, dtype=otypes[0]) + else: + res = tuple([asanyarray(x, dtype=t) + for x, t in zip(outputs, otypes)]) + return res + + def _vectorize_call_with_signature(self, func, args): + """Vectorized call over positional arguments with a signature.""" + input_core_dims, output_core_dims = self._in_and_out_core_dims + + if len(args) != len(input_core_dims): + raise TypeError('wrong number of positional arguments: ' + 'expected %r, got %r' + % (len(input_core_dims), len(args))) + args = tuple(asanyarray(arg) for arg in args) + + broadcast_shape, dim_sizes = _parse_input_dimensions( + args, input_core_dims) + input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, + input_core_dims) + args = [np.broadcast_to(arg, shape, subok=True) + for arg, shape in zip(args, input_shapes)] + + outputs = None + otypes = self.otypes + nout = len(output_core_dims) + + for index in np.ndindex(*broadcast_shape): + results = func(*(arg[index] for arg in args)) + + n_results = len(results) if isinstance(results, tuple) else 1 + + if nout != n_results: + raise ValueError( + 'wrong number of outputs from pyfunc: expected %r, got %r' + % (nout, n_results)) + + if nout == 1: + results = (results,) + + if outputs is None: + for result, core_dims in zip(results, output_core_dims): + _update_dim_sizes(dim_sizes, result, core_dims) + + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes, results) + + for output, result in zip(outputs, results): + output[index] = result + + if outputs is None: + # did not call the function even once + if otypes is None: + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + if builtins.any(dim not in dim_sizes + for dims in output_core_dims + for dim in dims): + raise ValueError('cannot call `vectorize` with a signature ' + 'including new output dimensions on size 0 ' + 'inputs') + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes) + + return outputs[0] if nout == 1 else outputs + + +def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None, + fweights=None, aweights=None, *, dtype=None): + return (m, y, fweights, aweights) + + +@array_function_dispatch(_cov_dispatcher) +def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, + aweights=None, *, dtype=None): + """ + Estimate a covariance matrix, given data and weights. + + Covariance indicates the level to which two variables vary together. + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, + then the covariance matrix element :math:`C_{ij}` is the covariance of + :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance + of :math:`x_i`. + + See the notes for an outline of the algorithm. + + Parameters + ---------- + m : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `m` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same form + as that of `m`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N - 1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. These values can be overridden by using + the keyword ``ddof`` in numpy versions >= 1.5. + ddof : int, optional + If not ``None`` the default value implied by `bias` is overridden. + Note that ``ddof=1`` will return the unbiased estimate, even if both + `fweights` and `aweights` are specified, and ``ddof=0`` will return + the simple average. See the notes for the details. The default value + is ``None``. + + .. versionadded:: 1.5 + fweights : array_like, int, optional + 1-D array of integer frequency weights; the number of times each + observation vector should be repeated. + + .. versionadded:: 1.10 + aweights : array_like, optional + 1-D array of observation vector weights. These relative weights are + typically large for observations considered "important" and smaller for + observations considered less "important". If ``ddof=0`` the array of + weights can be used to assign probabilities to observation vectors. + + .. versionadded:: 1.10 + dtype : data-type, optional + Data-type of the result. By default, the return data-type will have + at least `numpy.float64` precision. + + .. versionadded:: 1.20 + + Returns + ------- + out : ndarray + The covariance matrix of the variables. + + See Also + -------- + corrcoef : Normalized covariance matrix + + Notes + ----- + Assume that the observations are in the columns of the observation + array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The + steps to compute the weighted covariance are as follows:: + + >>> m = np.arange(10, dtype=np.float64) + >>> f = np.arange(10) * 2 + >>> a = np.arange(10) ** 2. + >>> ddof = 1 + >>> w = f * a + >>> v1 = np.sum(w) + >>> v2 = np.sum(w * a) + >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 + >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) + + Note that when ``a == 1``, the normalization factor + ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` + as it should. + + Examples + -------- + Consider two variables, :math:`x_0` and :math:`x_1`, which + correlate perfectly, but in opposite directions: + + >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T + >>> x + array([[0, 1, 2], + [2, 1, 0]]) + + Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance + matrix shows this clearly: + + >>> np.cov(x) + array([[ 1., -1.], + [-1., 1.]]) + + Note that element :math:`C_{0,1}`, which shows the correlation between + :math:`x_0` and :math:`x_1`, is negative. + + Further, note how `x` and `y` are combined: + + >>> x = [-2.1, -1, 4.3] + >>> y = [3, 1.1, 0.12] + >>> X = np.stack((x, y), axis=0) + >>> np.cov(X) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x, y) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x) + array(11.71) + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError( + "ddof must be integer") + + # Handles complex arrays too + m = np.asarray(m) + if m.ndim > 2: + raise ValueError("m has more than 2 dimensions") + + if y is not None: + y = np.asarray(y) + if y.ndim > 2: + raise ValueError("y has more than 2 dimensions") + + if dtype is None: + if y is None: + dtype = np.result_type(m, np.float64) + else: + dtype = np.result_type(m, y, np.float64) + + X = array(m, ndmin=2, dtype=dtype) + if not rowvar and X.shape[0] != 1: + X = X.T + if X.shape[0] == 0: + return np.array([]).reshape(0, 0) + if y is not None: + y = array(y, copy=None, ndmin=2, dtype=dtype) + if not rowvar and y.shape[0] != 1: + y = y.T + X = np.concatenate((X, y), axis=0) + + if ddof is None: + if bias == 0: + ddof = 1 + else: + ddof = 0 + + # Get the product of frequencies and weights + w = None + if fweights is not None: + fweights = np.asarray(fweights, dtype=float) + if not np.all(fweights == np.around(fweights)): + raise TypeError( + "fweights must be integer") + if fweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional fweights") + if fweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and fweights") + if any(fweights < 0): + raise ValueError( + "fweights cannot be negative") + w = fweights + if aweights is not None: + aweights = np.asarray(aweights, dtype=float) + if aweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional aweights") + if aweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and aweights") + if any(aweights < 0): + raise ValueError( + "aweights cannot be negative") + if w is None: + w = aweights + else: + w *= aweights + + avg, w_sum = average(X, axis=1, weights=w, returned=True) + w_sum = w_sum[0] + + # Determine the normalization + if w is None: + fact = X.shape[1] - ddof + elif ddof == 0: + fact = w_sum + elif aweights is None: + fact = w_sum - ddof + else: + fact = w_sum - ddof*sum(w*aweights)/w_sum + + if fact <= 0: + warnings.warn("Degrees of freedom <= 0 for slice", + RuntimeWarning, stacklevel=2) + fact = 0.0 + + X -= avg[:, None] + if w is None: + X_T = X.T + else: + X_T = (X*w).T + c = dot(X, X_T.conj()) + c *= np.true_divide(1, fact) + return c.squeeze() + + +def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *, + dtype=None): + return (x, y) + + +@array_function_dispatch(_corrcoef_dispatcher) +def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, + dtype=None): + """ + Return Pearson product-moment correlation coefficients. + + Please refer to the documentation for `cov` for more detail. The + relationship between the correlation coefficient matrix, `R`, and the + covariance matrix, `C`, is + + .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} C_{jj} } } + + The values of `R` are between -1 and 1, inclusive. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + ddof : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + dtype : data-type, optional + Data-type of the result. By default, the return data-type will have + at least `numpy.float64` precision. + + .. versionadded:: 1.20 + + Returns + ------- + R : ndarray + The correlation coefficient matrix of the variables. + + See Also + -------- + cov : Covariance matrix + + Notes + ----- + Due to floating point rounding the resulting array may not be Hermitian, + the diagonal elements may not be 1, and the elements may not satisfy the + inequality abs(a) <= 1. The real and imaginary parts are clipped to the + interval [-1, 1] in an attempt to improve on that situation but is not + much help in the complex case. + + This function accepts but discards arguments `bias` and `ddof`. This is + for backwards compatibility with previous versions of this function. These + arguments had no effect on the return values of the function and can be + safely ignored in this and previous versions of numpy. + + Examples + -------- + In this example we generate two random arrays, ``xarr`` and ``yarr``, and + compute the row-wise and column-wise Pearson correlation coefficients, + ``R``. Since ``rowvar`` is true by default, we first find the row-wise + Pearson correlation coefficients between the variables of ``xarr``. + + >>> import numpy as np + >>> rng = np.random.default_rng(seed=42) + >>> xarr = rng.random((3, 3)) + >>> xarr + array([[0.77395605, 0.43887844, 0.85859792], + [0.69736803, 0.09417735, 0.97562235], + [0.7611397 , 0.78606431, 0.12811363]]) + >>> R1 = np.corrcoef(xarr) + >>> R1 + array([[ 1. , 0.99256089, -0.68080986], + [ 0.99256089, 1. , -0.76492172], + [-0.68080986, -0.76492172, 1. ]]) + + If we add another set of variables and observations ``yarr``, we can + compute the row-wise Pearson correlation coefficients between the + variables in ``xarr`` and ``yarr``. + + >>> yarr = rng.random((3, 3)) + >>> yarr + array([[0.45038594, 0.37079802, 0.92676499], + [0.64386512, 0.82276161, 0.4434142 ], + [0.22723872, 0.55458479, 0.06381726]]) + >>> R2 = np.corrcoef(xarr, yarr) + >>> R2 + array([[ 1. , 0.99256089, -0.68080986, 0.75008178, -0.934284 , + -0.99004057], + [ 0.99256089, 1. , -0.76492172, 0.82502011, -0.97074098, + -0.99981569], + [-0.68080986, -0.76492172, 1. , -0.99507202, 0.89721355, + 0.77714685], + [ 0.75008178, 0.82502011, -0.99507202, 1. , -0.93657855, + -0.83571711], + [-0.934284 , -0.97074098, 0.89721355, -0.93657855, 1. , + 0.97517215], + [-0.99004057, -0.99981569, 0.77714685, -0.83571711, 0.97517215, + 1. ]]) + + Finally if we use the option ``rowvar=False``, the columns are now + being treated as the variables and we will find the column-wise Pearson + correlation coefficients between variables in ``xarr`` and ``yarr``. + + >>> R3 = np.corrcoef(xarr, yarr, rowvar=False) + >>> R3 + array([[ 1. , 0.77598074, -0.47458546, -0.75078643, -0.9665554 , + 0.22423734], + [ 0.77598074, 1. , -0.92346708, -0.99923895, -0.58826587, + -0.44069024], + [-0.47458546, -0.92346708, 1. , 0.93773029, 0.23297648, + 0.75137473], + [-0.75078643, -0.99923895, 0.93773029, 1. , 0.55627469, + 0.47536961], + [-0.9665554 , -0.58826587, 0.23297648, 0.55627469, 1. , + -0.46666491], + [ 0.22423734, -0.44069024, 0.75137473, 0.47536961, -0.46666491, + 1. ]]) + + """ + if bias is not np._NoValue or ddof is not np._NoValue: + # 2015-03-15, 1.10 + warnings.warn('bias and ddof have no effect and are deprecated', + DeprecationWarning, stacklevel=2) + c = cov(x, y, rowvar, dtype=dtype) + try: + d = diag(c) + except ValueError: + # scalar covariance + # nan if incorrect value (nan, inf, 0), 1 otherwise + return c / c + stddev = sqrt(d.real) + c /= stddev[:, None] + c /= stddev[None, :] + + # Clip real and imaginary parts to [-1, 1]. This does not guarantee + # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without + # excessive work. + np.clip(c.real, -1, 1, out=c.real) + if np.iscomplexobj(c): + np.clip(c.imag, -1, 1, out=c.imag) + + return c + + +@set_module('numpy') +def blackman(M): + """ + Return the Blackman window. + + The Blackman window is a taper formed by using the first three + terms of a summation of cosines. It was designed to have close to the + minimal leakage possible. It is close to optimal, only slightly worse + than a Kaiser window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value one + appears only if the number of samples is odd). + + See Also + -------- + bartlett, hamming, hanning, kaiser + + Notes + ----- + The Blackman window is defined as + + .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) + + Most references to the Blackman window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. It is known as a + "near optimal" tapering function, almost as good (by some measures) + as the kaiser window. + + References + ---------- + Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, + Dover Publications, New York. + + Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> np.blackman(12) + array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary + 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, + 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, + 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) + + Plot the window and the frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.blackman(51) + plt.plot(window) + plt.title("Blackman window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() # doctest: +SKIP + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + with np.errstate(divide='ignore', invalid='ignore'): + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Blackman window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1-M, M, 2) + return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1)) + + +@set_module('numpy') +def bartlett(M): + """ + Return the Bartlett window. + + The Bartlett window is very similar to a triangular window, except + that the end points are at zero. It is often used in signal + processing for tapering a signal, without generating too much + ripple in the frequency domain. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : array + The triangular window, with the maximum value normalized to one + (the value one appears only if the number of samples is odd), with + the first and last samples equal to zero. + + See Also + -------- + blackman, hamming, hanning, kaiser + + Notes + ----- + The Bartlett window is defined as + + .. math:: w(n) = \\frac{2}{M-1} \\left( + \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| + \\right) + + Most references to the Bartlett window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. Note that convolution with this window produces linear + interpolation. It is also known as an apodization (which means "removing + the foot", i.e. smoothing discontinuities at the beginning and end of the + sampled signal) or tapering function. The Fourier transform of the + Bartlett window is the product of two sinc functions. Note the excellent + discussion in Kanasewich [2]_. + + References + ---------- + .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika 37, 1-16, 1950. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 109-110. + .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal + Processing", Prentice-Hall, 1999, pp. 468-471. + .. [4] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 429. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> np.bartlett(12) + array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary + 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, + 0.18181818, 0. ]) + + Plot the window and its frequency response (requires SciPy and matplotlib). + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.bartlett(51) + plt.plot(window) + plt.title("Bartlett window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + with np.errstate(divide='ignore', invalid='ignore'): + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Bartlett window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1-M, M, 2) + return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1)) + + +@set_module('numpy') +def hanning(M): + """ + Return the Hanning window. + + The Hanning window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray, shape(M,) + The window, with the maximum value normalized to one (the value + one appears only if `M` is odd). + + See Also + -------- + bartlett, blackman, hamming, kaiser + + Notes + ----- + The Hanning window is defined as + + .. math:: w(n) = 0.5 - 0.5\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hanning was named for Julius von Hann, an Austrian meteorologist. + It is also known as the Cosine Bell. Some authors prefer that it be + called a Hann window, to help avoid confusion with the very similar + Hamming window. + + Most references to the Hanning window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 106-108. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> np.hanning(12) + array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, + 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, + 0.07937323, 0. ]) + + Plot the window and its frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.hanning(51) + plt.plot(window) + plt.title("Hann window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + with np.errstate(divide='ignore', invalid='ignore'): + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of the Hann window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1-M, M, 2) + return 0.5 + 0.5*cos(pi*n/(M-1)) + + +@set_module('numpy') +def hamming(M): + """ + Return the Hamming window. + + The Hamming window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hanning, kaiser + + Notes + ----- + The Hamming window is defined as + + .. math:: w(n) = 0.54 - 0.46\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hamming was named for R. W. Hamming, an associate of J. W. Tukey + and is described in Blackman and Tukey. It was recommended for + smoothing the truncated autocovariance function in the time domain. + Most references to the Hamming window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 109-110. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> np.hamming(12) + array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary + 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, + 0.15302337, 0.08 ]) + + Plot the window and the frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.hamming(51) + plt.plot(window) + plt.title("Hamming window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Hamming window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1-M, M, 2) + return 0.54 + 0.46*cos(pi*n/(M-1)) + + +## Code from cephes for i0 + +_i0A = [ + -4.41534164647933937950E-18, + 3.33079451882223809783E-17, + -2.43127984654795469359E-16, + 1.71539128555513303061E-15, + -1.16853328779934516808E-14, + 7.67618549860493561688E-14, + -4.85644678311192946090E-13, + 2.95505266312963983461E-12, + -1.72682629144155570723E-11, + 9.67580903537323691224E-11, + -5.18979560163526290666E-10, + 2.65982372468238665035E-9, + -1.30002500998624804212E-8, + 6.04699502254191894932E-8, + -2.67079385394061173391E-7, + 1.11738753912010371815E-6, + -4.41673835845875056359E-6, + 1.64484480707288970893E-5, + -5.75419501008210370398E-5, + 1.88502885095841655729E-4, + -5.76375574538582365885E-4, + 1.63947561694133579842E-3, + -4.32430999505057594430E-3, + 1.05464603945949983183E-2, + -2.37374148058994688156E-2, + 4.93052842396707084878E-2, + -9.49010970480476444210E-2, + 1.71620901522208775349E-1, + -3.04682672343198398683E-1, + 6.76795274409476084995E-1 + ] + +_i0B = [ + -7.23318048787475395456E-18, + -4.83050448594418207126E-18, + 4.46562142029675999901E-17, + 3.46122286769746109310E-17, + -2.82762398051658348494E-16, + -3.42548561967721913462E-16, + 1.77256013305652638360E-15, + 3.81168066935262242075E-15, + -9.55484669882830764870E-15, + -4.15056934728722208663E-14, + 1.54008621752140982691E-14, + 3.85277838274214270114E-13, + 7.18012445138366623367E-13, + -1.79417853150680611778E-12, + -1.32158118404477131188E-11, + -3.14991652796324136454E-11, + 1.18891471078464383424E-11, + 4.94060238822496958910E-10, + 3.39623202570838634515E-9, + 2.26666899049817806459E-8, + 2.04891858946906374183E-7, + 2.89137052083475648297E-6, + 6.88975834691682398426E-5, + 3.36911647825569408990E-3, + 8.04490411014108831608E-1 + ] + + +def _chbevl(x, vals): + b0 = vals[0] + b1 = 0.0 + + for i in range(1, len(vals)): + b2 = b1 + b1 = b0 + b0 = x*b1 - b2 + vals[i] + + return 0.5*(b0 - b2) + + +def _i0_1(x): + return exp(x) * _chbevl(x/2.0-2, _i0A) + + +def _i0_2(x): + return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) + + +def _i0_dispatcher(x): + return (x,) + + +@array_function_dispatch(_i0_dispatcher) +def i0(x): + """ + Modified Bessel function of the first kind, order 0. + + Usually denoted :math:`I_0`. + + Parameters + ---------- + x : array_like of float + Argument of the Bessel function. + + Returns + ------- + out : ndarray, shape = x.shape, dtype = float + The modified Bessel function evaluated at each of the elements of `x`. + + See Also + -------- + scipy.special.i0, scipy.special.iv, scipy.special.ive + + Notes + ----- + The scipy implementation is recommended over this function: it is a + proper ufunc written in C, and more than an order of magnitude faster. + + We use the algorithm published by Clenshaw [1]_ and referenced by + Abramowitz and Stegun [2]_, for which the function domain is + partitioned into the two intervals [0,8] and (8,inf), and Chebyshev + polynomial expansions are employed in each interval. Relative error on + the domain [0,30] using IEEE arithmetic is documented [3]_ as having a + peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). + + References + ---------- + .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in + *National Physical Laboratory Mathematical Tables*, vol. 5, London: + Her Majesty's Stationery Office, 1962. + .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical + Functions*, 10th printing, New York: Dover, 1964, pp. 379. + https://personal.math.ubc.ca/~cbm/aands/page_379.htm + .. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero + + Examples + -------- + >>> np.i0(0.) + array(1.0) + >>> np.i0([0, 1, 2, 3]) + array([1. , 1.26606588, 2.2795853 , 4.88079259]) + + """ + x = np.asanyarray(x) + if x.dtype.kind == 'c': + raise TypeError("i0 not supported for complex values") + if x.dtype.kind != 'f': + x = x.astype(float) + x = np.abs(x) + return piecewise(x, [x <= 8.0], [_i0_1, _i0_2]) + +## End of cephes code for i0 + + +@set_module('numpy') +def kaiser(M, beta): + """ + Return the Kaiser window. + + The Kaiser window is a taper formed by using a Bessel function. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + beta : float + Shape parameter for window. + + Returns + ------- + out : array + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hamming, hanning + + Notes + ----- + The Kaiser window is defined as + + .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} + \\right)/I_0(\\beta) + + with + + .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, + + where :math:`I_0` is the modified zeroth-order Bessel function. + + The Kaiser was named for Jim Kaiser, who discovered a simple + approximation to the DPSS window based on Bessel functions. The Kaiser + window is a very good approximation to the Digital Prolate Spheroidal + Sequence, or Slepian window, which is the transform which maximizes the + energy in the main lobe of the window relative to total energy. + + The Kaiser can approximate many other windows by varying the beta + parameter. + + ==== ======================= + beta Window shape + ==== ======================= + 0 Rectangular + 5 Similar to a Hamming + 6 Similar to a Hanning + 8.6 Similar to a Blackman + ==== ======================= + + A beta value of 14 is probably a good starting point. Note that as beta + gets large, the window narrows, and so the number of samples needs to be + large enough to sample the increasingly narrow spike, otherwise NaNs will + get returned. + + Most references to the Kaiser window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by + digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. + John Wiley and Sons, New York, (1966). + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 177-178. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> np.kaiser(12, 14) + array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary + 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, + 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, + 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) + + + Plot the window and the frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.kaiser(51, 14) + plt.plot(window) + plt.title("Kaiser window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Kaiser window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. (Simplified result_type with 0.0 + # strongly typed. result-type is not/less order sensitive, but that mainly + # matters for integers anyway.) + values = np.array([0.0, M, beta]) + M = values[1] + beta = values[2] + + if M == 1: + return np.ones(1, dtype=values.dtype) + n = arange(0, M) + alpha = (M-1)/2.0 + return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(beta) + + +def _sinc_dispatcher(x): + return (x,) + + +@array_function_dispatch(_sinc_dispatcher) +def sinc(x): + r""" + Return the normalized sinc function. + + The sinc function is equal to :math:`\sin(\pi x)/(\pi x)` for any argument + :math:`x\ne 0`. ``sinc(0)`` takes the limit value 1, making ``sinc`` not + only everywhere continuous but also infinitely differentiable. + + .. note:: + + Note the normalization factor of ``pi`` used in the definition. + This is the most commonly used definition in signal processing. + Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function + :math:`\sin(x)/x` that is more common in mathematics. + + Parameters + ---------- + x : ndarray + Array (possibly multi-dimensional) of values for which to calculate + ``sinc(x)``. + + Returns + ------- + out : ndarray + ``sinc(x)``, which has the same shape as the input. + + Notes + ----- + The name sinc is short for "sine cardinal" or "sinus cardinalis". + + The sinc function is used in various signal processing applications, + including in anti-aliasing, in the construction of a Lanczos resampling + filter, and in interpolation. + + For bandlimited interpolation of discrete-time signals, the ideal + interpolation kernel is proportional to the sinc function. + + References + ---------- + .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web + Resource. https://mathworld.wolfram.com/SincFunction.html + .. [2] Wikipedia, "Sinc function", + https://en.wikipedia.org/wiki/Sinc_function + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-4, 4, 41) + >>> np.sinc(x) + array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary + -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, + 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, + 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, + -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, + 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, + 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, + 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, + 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, + -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, + -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, + 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, + -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, + -4.92362781e-02, -3.89804309e-17]) + + >>> plt.plot(x, np.sinc(x)) + [] + >>> plt.title("Sinc Function") + Text(0.5, 1.0, 'Sinc Function') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("X") + Text(0.5, 0, 'X') + >>> plt.show() + + """ + x = np.asanyarray(x) + y = pi * where(x == 0, 1.0e-20, x) + return sin(y)/y + + +def _ureduce(a, func, keepdims=False, **kwargs): + """ + Internal Function. + Call `func` with `a` as first argument swapping the axes to use extended + axis on functions that don't support it natively. + + Returns result and a.shape with axis dims set to 1. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + func : callable + Reduction function capable of receiving a single axis argument. + It is called with `a` as first argument followed by `kwargs`. + kwargs : keyword arguments + additional keyword arguments to pass to `func`. + + Returns + ------- + result : tuple + Result of func(a, **kwargs) and a.shape with axis dims set to 1 + which can be used to reshape the result to the same shape a ufunc with + keepdims=True would produce. + + """ + a = np.asanyarray(a) + axis = kwargs.get('axis', None) + out = kwargs.get('out', None) + + if keepdims is np._NoValue: + keepdims = False + + nd = a.ndim + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, nd) + + if keepdims: + if out is not None: + index_out = tuple( + 0 if i in axis else slice(None) for i in range(nd)) + kwargs['out'] = out[(Ellipsis, ) + index_out] + + if len(axis) == 1: + kwargs['axis'] = axis[0] + else: + keep = set(range(nd)) - set(axis) + nkeep = len(keep) + # swap axis that should not be reduced to front + for i, s in enumerate(sorted(keep)): + a = a.swapaxes(i, s) + # merge reduced axis + a = a.reshape(a.shape[:nkeep] + (-1,)) + kwargs['axis'] = -1 + else: + if keepdims: + if out is not None: + index_out = (0, ) * nd + kwargs['out'] = out[(Ellipsis, ) + index_out] + + r = func(a, **kwargs) + + if out is not None: + return out + + if keepdims: + if axis is None: + index_r = (np.newaxis, ) * nd + else: + index_r = tuple( + np.newaxis if i in axis else slice(None) + for i in range(nd)) + r = r[(Ellipsis, ) + index_r] + + return r + + +def _median_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_median_dispatcher) +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default, + axis=None, will compute the median along a flattened version of + the array. + + .. versionadded:: 1.9.0 + + If a sequence of axes, the array is first flattened along the + given axes, then the median is computed along the resulting + flattened axis. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + .. versionadded:: 1.9.0 + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i + e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the + two middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.median(a) + np.float64(3.5) + >>> np.median(a, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.median(a, axis=1) + array([7., 2.]) + >>> np.median(a, axis=(0, 1)) + np.float64(3.5) + >>> m = np.median(a, axis=0) + >>> out = np.zeros_like(m) + >>> np.median(a, axis=0, out=m) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.median(b, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.median(b, axis=None, overwrite_input=True) + np.float64(3.5) + >>> assert not np.all(a==b) + + """ + return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _median(a, axis=None, out=None, overwrite_input=False): + # can't be reasonably be implemented in terms of percentile as we have to + # call mean to not break astropy + a = np.asanyarray(a) + + # Set the partition indexes + if axis is None: + sz = a.size + else: + sz = a.shape[axis] + if sz % 2 == 0: + szh = sz // 2 + kth = [szh - 1, szh] + else: + kth = [(sz - 1) // 2] + + # We have to check for NaNs (as of writing 'M' doesn't actually work). + supports_nans = np.issubdtype(a.dtype, np.inexact) or a.dtype.kind in 'Mm' + if supports_nans: + kth.append(-1) + + if overwrite_input: + if axis is None: + part = a.ravel() + part.partition(kth) + else: + a.partition(kth, axis=axis) + part = a + else: + part = partition(a, kth, axis=axis) + + if part.shape == (): + # make 0-D arrays work + return part.item() + if axis is None: + axis = 0 + + indexer = [slice(None)] * part.ndim + index = part.shape[axis] // 2 + if part.shape[axis] % 2 == 1: + # index with slice to allow mean (below) to work + indexer[axis] = slice(index, index+1) + else: + indexer[axis] = slice(index-1, index+1) + indexer = tuple(indexer) + + # Use mean in both odd and even case to coerce data type, + # using out array if needed. + rout = mean(part[indexer], axis=axis, out=out) + if supports_nans and sz > 0: + # If nans are possible, warn and replace by nans like mean would. + rout = np.lib._utils_impl._median_nancheck(part, rout, axis) + + return rout + + +def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None, + interpolation=None): + return (a, q, out, weights) + + +@array_function_dispatch(_percentile_dispatcher) +def percentile(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + *, + weights=None, + interpolation=None): + """ + Compute the q-th percentile of the data along the specified axis. + + Returns the q-th percentile(s) of the array elements. + + Parameters + ---------- + a : array_like of real numbers + Input array or object that can be converted to an array. + q : array_like of float + Percentage or sequence of percentages for the percentiles to compute. + Values must be between 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The + default is to compute the percentile(s) along a flattened + version of the array. + + .. versionchanged:: 1.9.0 + A tuple of axes is supported + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + method : str, optional + This parameter specifies the method to use for estimating the + percentile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + .. versionadded:: 1.9.0 + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the percentile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + See the notes for more details. + + .. versionadded:: 2.0.0 + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + median : equivalent to ``percentile(..., 50)`` + nanpercentile + quantile : equivalent to percentile, except q in the range [0, 1]. + + Notes + ----- + In general, the percentile at percentage level :math:`q` of a cumulative + distribution function :math:`F(y)=P(Y \\leq y)` with probability measure + :math:`P` is defined as any number :math:`x` that fulfills the + *coverage conditions* + + .. math:: P(Y < x) \\leq q/100 \\quad\\text{and} + \\quad P(Y \\leq x) \\geq q/100 + + with random variable :math:`Y\\sim P`. + Sample percentiles, the result of ``percentile``, provide nonparametric + estimation of the underlying population counterparts, represented by the + unknown :math:`F`, given a data vector ``a`` of length ``n``. + + One type of estimators arises when one considers :math:`F` as the empirical + distribution function of the data, i.e. + :math:`F(y) = \\frac{1}{n} \\sum_i 1_{a_i \\leq y}`. + Then, different methods correspond to different choices of :math:`x` that + fulfill the above inequalities. Methods that follow this approach are + ``inverted_cdf`` and ``averaged_inverted_cdf``. + + A more general way to define sample percentile estimators is as follows. + The empirical q-percentile of ``a`` is the ``n * q/100``-th value of the + way from the minimum to the maximum in a sorted copy of ``a``. The values + and distances of the two nearest neighbors as well as the `method` + parameter will determine the percentile if the normalized ranking does not + match the location of ``n * q/100`` exactly. This function is the same as + the median if ``q=50``, the same as the minimum if ``q=0`` and the same + as the maximum if ``q=100``. + + The optional `method` parameter specifies the method to use when the + desired percentile lies between two indexes ``i`` and ``j = i + 1``. + In that case, we first determine ``i + g``, a virtual index that lies + between ``i`` and ``j``, where ``i`` is the floor and ``g`` is the + fractional part of the index. The final result is, then, an interpolation + of ``a[i]`` and ``a[j]`` based on ``g``. During the computation of ``g``, + ``i`` and ``j`` are modified using correction constants ``alpha`` and + ``beta`` whose choices depend on the ``method`` used. Finally, note that + since Python uses 0-based indexing, the code subtracts another 1 from the + index internally. + + The following formula determines the virtual index ``i + g``, the location + of the percentile in the sorted sample: + + .. math:: + i + g = (q / 100) * ( n - alpha - beta + 1 ) + alpha + + The different methods then work as follows + + inverted_cdf: + method 1 of H&F [1]_. + This method gives discontinuous results: + + * if g > 0 ; then take j + * if g = 0 ; then take i + + averaged_inverted_cdf: + method 2 of H&F [1]_. + This method gives discontinuous results: + + * if g > 0 ; then take j + * if g = 0 ; then average between bounds + + closest_observation: + method 3 of H&F [1]_. + This method gives discontinuous results: + + * if g > 0 ; then take j + * if g = 0 and index is odd ; then take j + * if g = 0 and index is even ; then take i + + interpolated_inverted_cdf: + method 4 of H&F [1]_. + This method gives continuous results using: + + * alpha = 0 + * beta = 1 + + hazen: + method 5 of H&F [1]_. + This method gives continuous results using: + + * alpha = 1/2 + * beta = 1/2 + + weibull: + method 6 of H&F [1]_. + This method gives continuous results using: + + * alpha = 0 + * beta = 0 + + linear: + method 7 of H&F [1]_. + This method gives continuous results using: + + * alpha = 1 + * beta = 1 + + median_unbiased: + method 8 of H&F [1]_. + This method is probably the best method if the sample + distribution function is unknown (see reference). + This method gives continuous results using: + + * alpha = 1/3 + * beta = 1/3 + + normal_unbiased: + method 9 of H&F [1]_. + This method is probably the best method if the sample + distribution function is known to be normal. + This method gives continuous results using: + + * alpha = 3/8 + * beta = 3/8 + + lower: + NumPy method kept for backwards compatibility. + Takes ``i`` as the interpolation point. + + higher: + NumPy method kept for backwards compatibility. + Takes ``j`` as the interpolation point. + + nearest: + NumPy method kept for backwards compatibility. + Takes ``i`` or ``j``, whichever is nearest. + + midpoint: + NumPy method kept for backwards compatibility. + Uses ``(i + j) / 2``. + + For weighted percentiles, the above coverage conditions still hold. The + empirical cumulative distribution is simply replaced by its weighted + version, i.e. + :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. + Only ``method="inverted_cdf"`` supports weights. + + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.percentile(a, 50) + 3.5 + >>> np.percentile(a, 50, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.percentile(a, 50, axis=1) + array([7., 2.]) + >>> np.percentile(a, 50, axis=1, keepdims=True) + array([[7.], + [2.]]) + + >>> m = np.percentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.percentile(a, 50, axis=0, out=out) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + + >>> b = a.copy() + >>> np.percentile(b, 50, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a == b) + + The different methods can be visualized graphically: + + .. plot:: + + import matplotlib.pyplot as plt + + a = np.arange(4) + p = np.linspace(0, 100, 6001) + ax = plt.gca() + lines = [ + ('linear', '-', 'C0'), + ('inverted_cdf', ':', 'C1'), + # Almost the same as `inverted_cdf`: + ('averaged_inverted_cdf', '-.', 'C1'), + ('closest_observation', ':', 'C2'), + ('interpolated_inverted_cdf', '--', 'C1'), + ('hazen', '--', 'C3'), + ('weibull', '-.', 'C4'), + ('median_unbiased', '--', 'C5'), + ('normal_unbiased', '-.', 'C6'), + ] + for method, style, color in lines: + ax.plot( + p, np.percentile(a, p, method=method), + label=method, linestyle=style, color=color) + ax.set( + title='Percentiles for different methods and data: ' + str(a), + xlabel='Percentile', + ylabel='Estimated percentile value', + yticks=a) + ax.legend(bbox_to_anchor=(1.03, 1)) + plt.tight_layout() + plt.show() + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + if interpolation is not None: + method = _check_interpolation_as_method( + method, interpolation, "percentile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + # Use dtype of array if possible (e.g., if q is a python int or float) + # by making the divisor have the dtype of the data array. + q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100) + q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105) + if not _quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _quantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights) + + +def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None, + interpolation=None): + return (a, q, out, weights) + + +@array_function_dispatch(_quantile_dispatcher) +def quantile(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + *, + weights=None, + interpolation=None): + """ + Compute the q-th quantile of the data along the specified axis. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + a : array_like of real numbers + Input array or object that can be converted to an array. + q : array_like of float + Probability or sequence of probabilities for the quantiles to compute. + Values must be between 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The default is + to compute the quantile(s) along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape and buffer length as the expected output, but the + type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by + intermediate calculations, to save memory. In this case, the + contents of the input `a` after this function completes is + undefined. + method : str, optional + This parameter specifies the method to use for estimating the + quantile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the quantile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + See the notes for more details. + + .. versionadded:: 2.0.0 + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single probability and `axis=None`, then the result + is a scalar. If multiple probability levels are given, first axis + of the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + percentile : equivalent to quantile, but with q in the range [0, 100]. + median : equivalent to ``quantile(..., 0.5)`` + nanquantile + + Notes + ----- + Given a sample `a` from an underlying distribution, `quantile` provides a + nonparametric estimate of the inverse cumulative distribution function. + + By default, this is done by interpolating between adjacent elements in + ``y``, a sorted copy of `a`:: + + (1-g)*y[j] + g*y[j+1] + + where the index ``j`` and coefficient ``g`` are the integral and + fractional components of ``q * (n-1)``, and ``n`` is the number of + elements in the sample. + + This is a special case of Equation 1 of H&F [1]_. More generally, + + - ``j = (q*n + m - 1) // 1``, and + - ``g = (q*n + m - 1) % 1``, + + where ``m`` may be defined according to several different conventions. + The preferred convention may be selected using the ``method`` parameter: + + =============================== =============== =============== + ``method`` number in H&F ``m`` + =============================== =============== =============== + ``interpolated_inverted_cdf`` 4 ``0`` + ``hazen`` 5 ``1/2`` + ``weibull`` 6 ``q`` + ``linear`` (default) 7 ``1 - q`` + ``median_unbiased`` 8 ``q/3 + 1/3`` + ``normal_unbiased`` 9 ``q/4 + 3/8`` + =============================== =============== =============== + + Note that indices ``j`` and ``j + 1`` are clipped to the range ``0`` to + ``n - 1`` when the results of the formula would be outside the allowed + range of non-negative indices. The ``- 1`` in the formulas for ``j`` and + ``g`` accounts for Python's 0-based indexing. + + The table above includes only the estimators from H&F that are continuous + functions of probability `q` (estimators 4-9). NumPy also provides the + three discontinuous estimators from H&F (estimators 1-3), where ``j`` is + defined as above, ``m`` is defined as follows, and ``g`` is a function + of the real-valued ``index = q*n + m - 1`` and ``j``. + + 1. ``inverted_cdf``: ``m = 0`` and ``g = int(index - j > 0)`` + 2. ``averaged_inverted_cdf``: ``m = 0`` and + ``g = (1 + int(index - j > 0)) / 2`` + 3. ``closest_observation``: ``m = -1/2`` and + ``g = 1 - int((index == j) & (j%2 == 1))`` + + For backward compatibility with previous versions of NumPy, `quantile` + provides four additional discontinuous estimators. Like + ``method='linear'``, all have ``m = 1 - q`` so that ``j = q*(n-1) // 1``, + but ``g`` is defined as follows. + + - ``lower``: ``g = 0`` + - ``midpoint``: ``g = 0.5`` + - ``higher``: ``g = 1`` + - ``nearest``: ``g = (q*(n-1) % 1) > 0.5`` + + **Weighted quantiles:** + More formally, the quantile at probability level :math:`q` of a cumulative + distribution function :math:`F(y)=P(Y \\leq y)` with probability measure + :math:`P` is defined as any number :math:`x` that fulfills the + *coverage conditions* + + .. math:: P(Y < x) \\leq q \\quad\\text{and}\\quad P(Y \\leq x) \\geq q + + with random variable :math:`Y\\sim P`. + Sample quantiles, the result of ``quantile``, provide nonparametric + estimation of the underlying population counterparts, represented by the + unknown :math:`F`, given a data vector ``a`` of length ``n``. + + One type of estimators arises when one considers :math:`F` as the empirical + distribution function of the data, i.e. + :math:`F(y) = \\frac{1}{n} \\sum_i 1_{a_i \\leq y}`. + Then, different methods correspond to different choices of :math:`x` that + fulfill the above inequalities. Methods that follow this approach are + ``inverted_cdf`` and ``averaged_inverted_cdf``. + + A more general way to define sample quantile estimators is as follows. + The empirical q-quantile of ``a`` is the ``n * q``-th value of the + way from the minimum to the maximum in a sorted copy of ``a``. The values + and distances of the two nearest neighbors as well as the `method` + parameter will determine the quantile if the normalized ranking does not + match the location of ``n * q`` exactly. This function is the same as + the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the same + as the maximum if ``q=1.0``. + + The optional `method` parameter specifies the method to use when the + desired quantile lies between two indexes ``i`` and ``j = i + 1``. + In that case, we first determine ``i + g``, a virtual index that lies + between ``i`` and ``j``, where ``i`` is the floor and ``g`` is the + fractional part of the index. The final result is, then, an interpolation + of ``a[i]`` and ``a[j]`` based on ``g``. During the computation of ``g``, + ``i`` and ``j`` are modified using correction constants ``alpha`` and + ``beta`` whose choices depend on the ``method`` used. Finally, note that + since Python uses 0-based indexing, the code subtracts another 1 from the + index internally. + + The following formula determines the virtual index ``i + g``, the location + of the quantile in the sorted sample: + + .. math:: + i + g = q * ( n - alpha - beta + 1 ) + alpha + + The different methods then work as follows + + inverted_cdf: + method 1 of H&F [1]_. + This method gives discontinuous results: + + * if g > 0 ; then take j + * if g = 0 ; then take i + + averaged_inverted_cdf: + method 2 of H&F [1]_. + This method gives discontinuous results: + + * if g > 0 ; then take j + * if g = 0 ; then average between bounds + + closest_observation: + method 3 of H&F [1]_. + This method gives discontinuous results: + + * if g > 0 ; then take j + * if g = 0 and index is odd ; then take j + * if g = 0 and index is even ; then take i + + interpolated_inverted_cdf: + method 4 of H&F [1]_. + This method gives continuous results using: + + * alpha = 0 + * beta = 1 + + hazen: + method 5 of H&F [1]_. + This method gives continuous results using: + + * alpha = 1/2 + * beta = 1/2 + + weibull: + method 6 of H&F [1]_. + This method gives continuous results using: + + * alpha = 0 + * beta = 0 + + linear: + method 7 of H&F [1]_. + This method gives continuous results using: + + * alpha = 1 + * beta = 1 + + median_unbiased: + method 8 of H&F [1]_. + This method is probably the best method if the sample + distribution function is unknown (see reference). + This method gives continuous results using: + + * alpha = 1/3 + * beta = 1/3 + + normal_unbiased: + method 9 of H&F [1]_. + This method is probably the best method if the sample + distribution function is known to be normal. + This method gives continuous results using: + + * alpha = 3/8 + * beta = 3/8 + + lower: + NumPy method kept for backwards compatibility. + Takes ``i`` as the interpolation point. + + higher: + NumPy method kept for backwards compatibility. + Takes ``j`` as the interpolation point. + + nearest: + NumPy method kept for backwards compatibility. + Takes ``i`` or ``j``, whichever is nearest. + + midpoint: + NumPy method kept for backwards compatibility. + Uses ``(i + j) / 2``. + + **Weighted quantiles:** + For weighted quantiles, the above coverage conditions still hold. The + empirical cumulative distribution is simply replaced by its weighted + version, i.e. + :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. + Only ``method="inverted_cdf"`` supports weights. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.quantile(a, 0.5) + 3.5 + >>> np.quantile(a, 0.5, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.quantile(a, 0.5, axis=1) + array([7., 2.]) + >>> np.quantile(a, 0.5, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.quantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.quantile(a, 0.5, axis=0, out=out) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a == b) + + See also `numpy.percentile` for a visualization of most methods. + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + if interpolation is not None: + method = _check_interpolation_as_method( + method, interpolation, "quantile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + # Use dtype of array if possible (e.g., if q is a python int or float). + if isinstance(q, (int, float)) and a.dtype.kind == "f": + q = np.asanyarray(q, dtype=a.dtype) + else: + q = np.asanyarray(q) + + if not _quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _quantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights) + + +def _quantile_unchecked(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + weights=None): + """Assumes that q is in [0, 1], and is an ndarray""" + return _ureduce(a, + func=_quantile_ureduce_func, + q=q, + weights=weights, + keepdims=keepdims, + axis=axis, + out=out, + overwrite_input=overwrite_input, + method=method) + + +def _quantile_is_valid(q): + # avoid expensive reductions, relevant for arrays with < O(1000) elements + if q.ndim == 1 and q.size < 10: + for i in range(q.size): + if not (0.0 <= q[i] <= 1.0): + return False + else: + if not (q.min() >= 0 and q.max() <= 1): + return False + return True + + +def _check_interpolation_as_method(method, interpolation, fname): + # Deprecated NumPy 1.22, 2021-11-08 + warnings.warn( + f"the `interpolation=` argument to {fname} was renamed to " + "`method=`, which has additional options.\n" + "Users of the modes 'nearest', 'lower', 'higher', or " + "'midpoint' are encouraged to review the method they used. " + "(Deprecated NumPy 1.22)", + DeprecationWarning, stacklevel=4) + if method != "linear": + # sanity check, we assume this basically never happens + raise TypeError( + "You shall not pass both `method` and `interpolation`!\n" + "(`interpolation` is Deprecated in favor of `method`)") + return interpolation + + +def _compute_virtual_index(n, quantiles, alpha: float, beta: float): + """ + Compute the floating point indexes of an array for the linear + interpolation of quantiles. + n : array_like + The sample sizes. + quantiles : array_like + The quantiles values. + alpha : float + A constant used to correct the index computed. + beta : float + A constant used to correct the index computed. + + alpha and beta values depend on the chosen method + (see quantile documentation) + + Reference: + Hyndman&Fan paper "Sample Quantiles in Statistical Packages", + DOI: 10.1080/00031305.1996.10473566 + """ + return n * quantiles + ( + alpha + quantiles * (1 - alpha - beta) + ) - 1 + + +def _get_gamma(virtual_indexes, previous_indexes, method): + """ + Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation + of quantiles. + + virtual_indexes : array_like + The indexes where the percentile is supposed to be found in the sorted + sample. + previous_indexes : array_like + The floor values of virtual_indexes. + interpolation : dict + The interpolation method chosen, which may have a specific rule + modifying gamma. + + gamma is usually the fractional part of virtual_indexes but can be modified + by the interpolation method. + """ + gamma = np.asanyarray(virtual_indexes - previous_indexes) + gamma = method["fix_gamma"](gamma, virtual_indexes) + # Ensure both that we have an array, and that we keep the dtype + # (which may have been matched to the input array). + return np.asanyarray(gamma, dtype=virtual_indexes.dtype) + + +def _lerp(a, b, t, out=None): + """ + Compute the linear interpolation weighted by gamma on each point of + two same shape array. + + a : array_like + Left bound. + b : array_like + Right bound. + t : array_like + The interpolation weight. + out : array_like + Output array. + """ + diff_b_a = subtract(b, a) + # asanyarray is a stop-gap until gh-13105 + lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out)) + subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5, + casting='unsafe', dtype=type(lerp_interpolation.dtype)) + if lerp_interpolation.ndim == 0 and out is None: + lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays + return lerp_interpolation + + +def _get_gamma_mask(shape, default_value, conditioned_value, where): + out = np.full(shape, default_value) + np.copyto(out, conditioned_value, where=where, casting="unsafe") + return out + + +def _discret_interpolation_to_boundaries(index, gamma_condition_fun): + previous = np.floor(index) + next = previous + 1 + gamma = index - previous + res = _get_gamma_mask(shape=index.shape, + default_value=next, + conditioned_value=previous, + where=gamma_condition_fun(gamma, index) + ).astype(np.intp) + # Some methods can lead to out-of-bound integers, clip them: + res[res < 0] = 0 + return res + + +def _closest_observation(n, quantiles): + # "choose the nearest even order statistic at g=0" (H&F (1996) pp. 362). + # Order is 1-based so for zero-based indexing round to nearest odd index. + gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 1) + return _discret_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, + gamma_fun) + + +def _inverted_cdf(n, quantiles): + gamma_fun = lambda gamma, _: (gamma == 0) + return _discret_interpolation_to_boundaries((n * quantiles) - 1, + gamma_fun) + + +def _quantile_ureduce_func( + a: np.array, + q: np.array, + weights: np.array, + axis: int = None, + out=None, + overwrite_input: bool = False, + method="linear", +) -> np.array: + if q.ndim > 2: + # The code below works fine for nd, but it might not have useful + # semantics. For now, keep the supported dimensions the same as it was + # before. + raise ValueError("q must be a scalar or 1d") + if overwrite_input: + if axis is None: + axis = 0 + arr = a.ravel() + wgt = None if weights is None else weights.ravel() + else: + arr = a + wgt = weights + else: + if axis is None: + axis = 0 + arr = a.flatten() + wgt = None if weights is None else weights.flatten() + else: + arr = a.copy() + wgt = weights + result = _quantile(arr, + quantiles=q, + axis=axis, + method=method, + out=out, + weights=wgt) + return result + + +def _get_indexes(arr, virtual_indexes, valid_values_count): + """ + Get the valid indexes of arr neighbouring virtual_indexes. + Note + This is a companion function to linear interpolation of + Quantiles + + Returns + ------- + (previous_indexes, next_indexes): Tuple + A Tuple of virtual_indexes neighbouring indexes + """ + previous_indexes = np.asanyarray(np.floor(virtual_indexes)) + next_indexes = np.asanyarray(previous_indexes + 1) + indexes_above_bounds = virtual_indexes >= valid_values_count - 1 + # When indexes is above max index, take the max value of the array + if indexes_above_bounds.any(): + previous_indexes[indexes_above_bounds] = -1 + next_indexes[indexes_above_bounds] = -1 + # When indexes is below min index, take the min value of the array + indexes_below_bounds = virtual_indexes < 0 + if indexes_below_bounds.any(): + previous_indexes[indexes_below_bounds] = 0 + next_indexes[indexes_below_bounds] = 0 + if np.issubdtype(arr.dtype, np.inexact): + # After the sort, slices having NaNs will have for last element a NaN + virtual_indexes_nans = np.isnan(virtual_indexes) + if virtual_indexes_nans.any(): + previous_indexes[virtual_indexes_nans] = -1 + next_indexes[virtual_indexes_nans] = -1 + previous_indexes = previous_indexes.astype(np.intp) + next_indexes = next_indexes.astype(np.intp) + return previous_indexes, next_indexes + + +def _quantile( + arr: np.array, + quantiles: np.array, + axis: int = -1, + method="linear", + out=None, + weights=None, +): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanpercentile for parameter usage + It computes the quantiles of the array for the given axis. + A linear interpolation is performed based on the `interpolation`. + + By default, the method is "linear" where alpha == beta == 1 which + performs the 7th method of Hyndman&Fan. + With "median_unbiased" we get alpha == beta == 1/3 + thus the 8th method of Hyndman&Fan. + """ + # --- Setup + arr = np.asanyarray(arr) + values_count = arr.shape[axis] + # The dimensions of `q` are prepended to the output shape, so we need the + # axis being sampled from `arr` to be last. + if axis != 0: # But moveaxis is slow, so only call it if necessary. + arr = np.moveaxis(arr, axis, destination=0) + supports_nans = ( + np.issubdtype(arr.dtype, np.inexact) or arr.dtype.kind in 'Mm' + ) + + if weights is None: + # --- Computation of indexes + # Index where to find the value in the sorted array. + # Virtual because it is a floating point value, not an valid index. + # The nearest neighbours are used for interpolation + try: + method_props = _QuantileMethods[method] + except KeyError: + raise ValueError( + f"{method!r} is not a valid method. Use one of: " + f"{_QuantileMethods.keys()}") from None + virtual_indexes = method_props["get_virtual_index"](values_count, + quantiles) + virtual_indexes = np.asanyarray(virtual_indexes) + + if method_props["fix_gamma"] is None: + supports_integers = True + else: + int_virtual_indices = np.issubdtype(virtual_indexes.dtype, + np.integer) + supports_integers = method == 'linear' and int_virtual_indices + + if supports_integers: + # No interpolation needed, take the points along axis + if supports_nans: + # may contain nan, which would sort to the end + arr.partition( + concatenate((virtual_indexes.ravel(), [-1])), axis=0, + ) + slices_having_nans = np.isnan(arr[-1, ...]) + else: + # cannot contain nan + arr.partition(virtual_indexes.ravel(), axis=0) + slices_having_nans = np.array(False, dtype=bool) + result = take(arr, virtual_indexes, axis=0, out=out) + else: + previous_indexes, next_indexes = _get_indexes(arr, + virtual_indexes, + values_count) + # --- Sorting + arr.partition( + np.unique(np.concatenate(([0, -1], + previous_indexes.ravel(), + next_indexes.ravel(), + ))), + axis=0) + if supports_nans: + slices_having_nans = np.isnan(arr[-1, ...]) + else: + slices_having_nans = None + # --- Get values from indexes + previous = arr[previous_indexes] + next = arr[next_indexes] + # --- Linear interpolation + gamma = _get_gamma(virtual_indexes, previous_indexes, method_props) + result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) + gamma = gamma.reshape(result_shape) + result = _lerp(previous, + next, + gamma, + out=out) + else: + # Weighted case + # This implements method="inverted_cdf", the only supported weighted + # method, which needs to sort anyway. + weights = np.asanyarray(weights) + if axis != 0: + weights = np.moveaxis(weights, axis, destination=0) + index_array = np.argsort(arr, axis=0, kind="stable") + + # arr = arr[index_array, ...] # but this adds trailing dimensions of + # 1. + arr = np.take_along_axis(arr, index_array, axis=0) + if weights.shape == arr.shape: + weights = np.take_along_axis(weights, index_array, axis=0) + else: + # weights is 1d + weights = weights.reshape(-1)[index_array, ...] + + if supports_nans: + # may contain nan, which would sort to the end + slices_having_nans = np.isnan(arr[-1, ...]) + else: + # cannot contain nan + slices_having_nans = np.array(False, dtype=bool) + + # We use the weights to calculate the empirical cumulative + # distribution function cdf + cdf = weights.cumsum(axis=0, dtype=np.float64) + cdf /= cdf[-1, ...] # normalization to 1 + # Search index i such that + # sum(weights[j], j=0..i-1) < quantile <= sum(weights[j], j=0..i) + # is then equivalent to + # cdf[i-1] < quantile <= cdf[i] + # Unfortunately, searchsorted only accepts 1-d arrays as first + # argument, so we will need to iterate over dimensions. + + # Without the following cast, searchsorted can return surprising + # results, e.g. + # np.searchsorted(np.array([0.2, 0.4, 0.6, 0.8, 1.]), + # np.array(0.4, dtype=np.float32), side="left") + # returns 2 instead of 1 because 0.4 is not binary representable. + if quantiles.dtype.kind == "f": + cdf = cdf.astype(quantiles.dtype) + + def find_cdf_1d(arr, cdf): + indices = np.searchsorted(cdf, quantiles, side="left") + # We might have reached the maximum with i = len(arr), e.g. for + # quantiles = 1, and need to cut it to len(arr) - 1. + indices = minimum(indices, values_count - 1) + result = take(arr, indices, axis=0) + return result + + r_shape = arr.shape[1:] + if quantiles.ndim > 0: + r_shape = quantiles.shape + r_shape + if out is None: + result = np.empty_like(arr, shape=r_shape) + else: + if out.shape != r_shape: + msg = (f"Wrong shape of argument 'out', shape={r_shape} is " + f"required; got shape={out.shape}.") + raise ValueError(msg) + result = out + + # See apply_along_axis, which we do for axis=0. Note that Ni = (,) + # always, so we remove it here. + Nk = arr.shape[1:] + for kk in np.ndindex(Nk): + result[(...,) + kk] = find_cdf_1d( + arr[np.s_[:, ] + kk], cdf[np.s_[:, ] + kk] + ) + + # Make result the same as in unweighted inverted_cdf. + if result.shape == () and result.dtype == np.dtype("O"): + result = result.item() + + if np.any(slices_having_nans): + if result.ndim == 0 and out is None: + # can't write to a scalar, but indexing will be correct + result = arr[-1] + else: + np.copyto(result, arr[-1, ...], where=slices_having_nans) + return result + + +def _trapezoid_dispatcher(y, x=None, dx=None, axis=None): + return (y, x) + + +@array_function_dispatch(_trapezoid_dispatcher) +def trapezoid(y, x=None, dx=1.0, axis=-1): + r""" + Integrate along the given axis using the composite trapezoidal rule. + + If `x` is provided, the integration happens in sequence along its + elements - they are not sorted. + + Integrate `y` (`x`) along each 1d slice on the given axis, compute + :math:`\int y(x) dx`. + When `x` is specified, this integrates along the parametric curve, + computing :math:`\int_t y(t) dt = + \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. + + .. versionadded:: 2.0.0 + + Parameters + ---------- + y : array_like + Input array to integrate. + x : array_like, optional + The sample points corresponding to the `y` values. If `x` is None, + the sample points are assumed to be evenly spaced `dx` apart. The + default is None. + dx : scalar, optional + The spacing between sample points when `x` is None. The default is 1. + axis : int, optional + The axis along which to integrate. + + Returns + ------- + trapezoid : float or ndarray + Definite integral of `y` = n-dimensional array as approximated along + a single axis by the trapezoidal rule. If `y` is a 1-dimensional array, + then the result is a float. If `n` is greater than 1, then the result + is an `n`-1 dimensional array. + + See Also + -------- + sum, cumsum + + Notes + ----- + Image [2]_ illustrates trapezoidal rule -- y-axis locations of points + will be taken from `y` array, by default x-axis distances between + points will be 1.0, alternatively they can be provided with `x` array + or with `dx` scalar. Return value will be equal to combined area under + the red lines. + + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule + + .. [2] Illustration image: + https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + + Examples + -------- + Use the trapezoidal rule on evenly spaced points: + + >>> np.trapezoid([1, 2, 3]) + 4.0 + + The spacing between sample points can be selected by either the + ``x`` or ``dx`` arguments: + + >>> np.trapezoid([1, 2, 3], x=[4, 6, 8]) + 8.0 + >>> np.trapezoid([1, 2, 3], dx=2) + 8.0 + + Using a decreasing ``x`` corresponds to integrating in reverse: + + >>> np.trapezoid([1, 2, 3], x=[8, 6, 4]) + -8.0 + + More generally ``x`` is used to integrate along a parametric curve. We can + estimate the integral :math:`\int_0^1 x^2 = 1/3` using: + + >>> x = np.linspace(0, 1, num=50) + >>> y = x**2 + >>> np.trapezoid(y, x) + 0.33340274885464394 + + Or estimate the area of a circle, noting we repeat the sample which closes + the curve: + + >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) + >>> np.trapezoid(np.cos(theta), x=np.sin(theta)) + 3.141571941375841 + + ``np.trapezoid`` can be applied along a specified axis to do multiple + computations in one call: + + >>> a = np.arange(6).reshape(2, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.trapezoid(a, axis=0) + array([1.5, 2.5, 3.5]) + >>> np.trapezoid(a, axis=1) + array([2., 8.]) + """ + + y = asanyarray(y) + if x is None: + d = dx + else: + x = asanyarray(x) + if x.ndim == 1: + d = diff(x) + # reshape to correct shape + shape = [1]*y.ndim + shape[axis] = d.shape[0] + d = d.reshape(shape) + else: + d = diff(x, axis=axis) + nd = y.ndim + slice1 = [slice(None)]*nd + slice2 = [slice(None)]*nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + try: + ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) + except ValueError: + # Operations didn't work, cast to ndarray + d = np.asarray(d) + y = np.asarray(y) + ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis) + return ret + + +@set_module('numpy') +def trapz(y, x=None, dx=1.0, axis=-1): + """ + `trapz` is deprecated in NumPy 2.0. + + Please use `trapezoid` instead, or one of the numerical integration + functions in `scipy.integrate`. + """ + # Deprecated in NumPy 2.0, 2023-08-18 + warnings.warn( + "`trapz` is deprecated. Use `trapezoid` instead, or one of the " + "numerical integration functions in `scipy.integrate`.", + DeprecationWarning, + stacklevel=2 + ) + return trapezoid(y, x=x, dx=dx, axis=axis) + + +def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None): + return xi + + +# Based on scitools meshgrid +@array_function_dispatch(_meshgrid_dispatcher) +def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): + """ + Return a tuple of coordinate matrices from coordinate vectors. + + Make N-D coordinate arrays for vectorized evaluations of + N-D scalar/vector fields over N-D grids, given + one-dimensional coordinate arrays x1, x2,..., xn. + + .. versionchanged:: 1.9 + 1-D and 0-D cases are allowed. + + Parameters + ---------- + x1, x2,..., xn : array_like + 1-D arrays representing the coordinates of a grid. + indexing : {'xy', 'ij'}, optional + Cartesian ('xy', default) or matrix ('ij') indexing of output. + See Notes for more details. + + .. versionadded:: 1.7.0 + sparse : bool, optional + If True the shape of the returned coordinate array for dimension *i* + is reduced from ``(N1, ..., Ni, ... Nn)`` to + ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are + intended to be use with :ref:`basics.broadcasting`. When all + coordinates are used in an expression, broadcasting still leads to a + fully-dimensonal result array. + + Default is False. + + .. versionadded:: 1.7.0 + copy : bool, optional + If False, a view into the original arrays are returned in order to + conserve memory. Default is True. Please note that + ``sparse=False, copy=False`` will likely return non-contiguous + arrays. Furthermore, more than one element of a broadcast array + may refer to a single memory location. If you need to write to the + arrays, make copies first. + + .. versionadded:: 1.7.0 + + Returns + ------- + X1, X2,..., XN : tuple of ndarrays + For vectors `x1`, `x2`,..., `xn` with lengths ``Ni=len(xi)``, + returns ``(N1, N2, N3,..., Nn)`` shaped arrays if indexing='ij' + or ``(N2, N1, N3,..., Nn)`` shaped arrays if indexing='xy' + with the elements of `xi` repeated to fill the matrix along + the first dimension for `x1`, the second for `x2` and so on. + + Notes + ----- + This function supports both indexing conventions through the indexing + keyword argument. Giving the string 'ij' returns a meshgrid with + matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. + In the 2-D case with inputs of length M and N, the outputs are of shape + (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case + with inputs of length M, N and P, outputs are of shape (N, M, P) for + 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is + illustrated by the following code snippet:: + + xv, yv = np.meshgrid(x, y, indexing='ij') + for i in range(nx): + for j in range(ny): + # treat xv[i,j], yv[i,j] + + xv, yv = np.meshgrid(x, y, indexing='xy') + for i in range(nx): + for j in range(ny): + # treat xv[j,i], yv[j,i] + + In the 1-D and 0-D case, the indexing and sparse keywords have no effect. + + See Also + -------- + mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. + ogrid : Construct an open multi-dimensional "meshgrid" using indexing + notation. + :ref:`how-to-index` + + Examples + -------- + >>> nx, ny = (3, 2) + >>> x = np.linspace(0, 1, nx) + >>> y = np.linspace(0, 1, ny) + >>> xv, yv = np.meshgrid(x, y) + >>> xv + array([[0. , 0.5, 1. ], + [0. , 0.5, 1. ]]) + >>> yv + array([[0., 0., 0.], + [1., 1., 1.]]) + + The result of `meshgrid` is a coordinate grid: + + >>> import matplotlib.pyplot as plt + >>> plt.plot(xv, yv, marker='o', color='k', linestyle='none') + >>> plt.show() + + You can create sparse output arrays to save memory and computation time. + + >>> xv, yv = np.meshgrid(x, y, sparse=True) + >>> xv + array([[0. , 0.5, 1. ]]) + >>> yv + array([[0.], + [1.]]) + + `meshgrid` is very useful to evaluate functions on a grid. If the + function depends on all coordinates, both dense and sparse outputs can be + used. + + >>> x = np.linspace(-5, 5, 101) + >>> y = np.linspace(-5, 5, 101) + >>> # full coordinate arrays + >>> xx, yy = np.meshgrid(x, y) + >>> zz = np.sqrt(xx**2 + yy**2) + >>> xx.shape, yy.shape, zz.shape + ((101, 101), (101, 101), (101, 101)) + >>> # sparse coordinate arrays + >>> xs, ys = np.meshgrid(x, y, sparse=True) + >>> zs = np.sqrt(xs**2 + ys**2) + >>> xs.shape, ys.shape, zs.shape + ((1, 101), (101, 1), (101, 101)) + >>> np.array_equal(zz, zs) + True + + >>> h = plt.contourf(x, y, zs) + >>> plt.axis('scaled') + >>> plt.colorbar() + >>> plt.show() + """ + ndim = len(xi) + + if indexing not in ['xy', 'ij']: + raise ValueError( + "Valid values for `indexing` are 'xy' and 'ij'.") + + s0 = (1,) * ndim + output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) + for i, x in enumerate(xi)] + + if indexing == 'xy' and ndim > 1: + # switch first and second axis + output[0].shape = (1, -1) + s0[2:] + output[1].shape = (-1, 1) + s0[2:] + + if not sparse: + # Return the full N-D matrix (not only the 1-D vector) + output = np.broadcast_arrays(*output, subok=True) + + if copy: + output = tuple(x.copy() for x in output) + + return output + + +def _delete_dispatcher(arr, obj, axis=None): + return (arr, obj) + + +@array_function_dispatch(_delete_dispatcher) +def delete(arr, obj, axis=None): + """ + Return a new array with sub-arrays along an axis deleted. For a one + dimensional array, this returns those entries not returned by + `arr[obj]`. + + Parameters + ---------- + arr : array_like + Input array. + obj : slice, int or array of ints + Indicate indices of sub-arrays to remove along the specified axis. + + .. versionchanged:: 1.19.0 + Boolean indices are now treated as a mask of elements to remove, + rather than being cast to the integers 0 and 1. + + axis : int, optional + The axis along which to delete the subarray defined by `obj`. + If `axis` is None, `obj` is applied to the flattened array. + + Returns + ------- + out : ndarray + A copy of `arr` with the elements specified by `obj` removed. Note + that `delete` does not occur in-place. If `axis` is None, `out` is + a flattened array. + + See Also + -------- + insert : Insert elements into an array. + append : Append elements at the end of an array. + + Notes + ----- + Often it is preferable to use a boolean mask. For example: + + >>> arr = np.arange(12) + 1 + >>> mask = np.ones(len(arr), dtype=bool) + >>> mask[[0,2,4]] = False + >>> result = arr[mask,...] + + Is equivalent to ``np.delete(arr, [0,2,4], axis=0)``, but allows further + use of `mask`. + + Examples + -------- + >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) + >>> arr + array([[ 1, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12]]) + >>> np.delete(arr, 1, 0) + array([[ 1, 2, 3, 4], + [ 9, 10, 11, 12]]) + + >>> np.delete(arr, np.s_[::2], 1) + array([[ 2, 4], + [ 6, 8], + [10, 12]]) + >>> np.delete(arr, [1,3,5], None) + array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) + + """ + conv = _array_converter(arr) + arr, = conv.as_arrays(subok=False) + + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + # needed for np.matrix, which is still not 1d after being ravelled + ndim = arr.ndim + axis = ndim - 1 + else: + axis = normalize_axis_index(axis, ndim) + + slobj = [slice(None)]*ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + start, stop, step = obj.indices(N) + xr = range(start, stop, step) + numtodel = len(xr) + + if numtodel <= 0: + return conv.wrap(arr.copy(order=arrorder), to_scalar=False) + + # Invert if step is negative: + if step < 0: + step = -step + start = xr[-1] + stop = xr[0] + 1 + + newshape[axis] -= numtodel + new = empty(newshape, arr.dtype, arrorder) + # copy initial chunk + if start == 0: + pass + else: + slobj[axis] = slice(None, start) + new[tuple(slobj)] = arr[tuple(slobj)] + # copy end chunk + if stop == N: + pass + else: + slobj[axis] = slice(stop-numtodel, None) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(stop, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + # copy middle pieces + if step == 1: + pass + else: # use array indexing. + keep = ones(stop-start, dtype=bool) + keep[:stop-start:step] = False + slobj[axis] = slice(start, stop-numtodel) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(start, stop) + arr = arr[tuple(slobj2)] + slobj2[axis] = keep + new[tuple(slobj)] = arr[tuple(slobj2)] + + return conv.wrap(new, to_scalar=False) + + if isinstance(obj, (int, integer)) and not isinstance(obj, bool): + single_value = True + else: + single_value = False + _obj = obj + obj = np.asarray(obj) + # `size == 0` to allow empty lists similar to indexing, but (as there) + # is really too generic: + if obj.size == 0 and not isinstance(_obj, np.ndarray): + obj = obj.astype(intp) + elif obj.size == 1 and obj.dtype.kind in "ui": + # For a size 1 integer array we can use the single-value path + # (most dtypes, except boolean, should just fail later). + obj = obj.item() + single_value = True + + if single_value: + # optimization for a single value + if (obj < -N or obj >= N): + raise IndexError( + "index %i is out of bounds for axis %i with " + "size %i" % (obj, axis, N)) + if (obj < 0): + obj += N + newshape[axis] -= 1 + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, obj) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(obj, None) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(obj+1, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + else: + if obj.dtype == bool: + if obj.shape != (N,): + raise ValueError('boolean array argument obj to delete ' + 'must be one dimensional and match the axis ' + 'length of {}'.format(N)) + + # optimization, the other branch is slower + keep = ~obj + else: + keep = ones(N, dtype=bool) + keep[obj,] = False + + slobj[axis] = keep + new = arr[tuple(slobj)] + + return conv.wrap(new, to_scalar=False) + + +def _insert_dispatcher(arr, obj, values, axis=None): + return (arr, obj, values) + + +@array_function_dispatch(_insert_dispatcher) +def insert(arr, obj, values, axis=None): + """ + Insert values along the given axis before the given indices. + + Parameters + ---------- + arr : array_like + Input array. + obj : int, slice or sequence of ints + Object that defines the index or indices before which `values` is + inserted. + + .. versionadded:: 1.8.0 + + Support for multiple insertions when `obj` is a single scalar or a + sequence with one element (similar to calling insert multiple + times). + values : array_like + Values to insert into `arr`. If the type of `values` is different + from that of `arr`, `values` is converted to the type of `arr`. + `values` should be shaped so that ``arr[...,obj,...] = values`` + is legal. + axis : int, optional + Axis along which to insert `values`. If `axis` is None then `arr` + is flattened first. + + Returns + ------- + out : ndarray + A copy of `arr` with `values` inserted. Note that `insert` + does not occur in-place: a new array is returned. If + `axis` is None, `out` is a flattened array. + + See Also + -------- + append : Append elements at the end of an array. + concatenate : Join a sequence of arrays along an existing axis. + delete : Delete elements from an array. + + Notes + ----- + Note that for higher dimensional inserts ``obj=0`` behaves very different + from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from + ``arr[:,[0],:] = values``. + + Examples + -------- + >>> a = np.array([[1, 1], [2, 2], [3, 3]]) + >>> a + array([[1, 1], + [2, 2], + [3, 3]]) + >>> np.insert(a, 1, 5) + array([1, 5, 1, ..., 2, 3, 3]) + >>> np.insert(a, 1, 5, axis=1) + array([[1, 5, 1], + [2, 5, 2], + [3, 5, 3]]) + + Difference between sequence and scalars: + + >>> np.insert(a, [1], [[1],[2],[3]], axis=1) + array([[1, 1, 1], + [2, 2, 2], + [3, 3, 3]]) + >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), + ... np.insert(a, [1], [[1],[2],[3]], axis=1)) + True + + >>> b = a.flatten() + >>> b + array([1, 1, 2, 2, 3, 3]) + >>> np.insert(b, [2, 2], [5, 6]) + array([1, 1, 5, ..., 2, 3, 3]) + + >>> np.insert(b, slice(2, 4), [5, 6]) + array([1, 1, 5, ..., 2, 3, 3]) + + >>> np.insert(b, [2, 2], [7.13, False]) # type casting + array([1, 1, 7, ..., 2, 3, 3]) + + >>> x = np.arange(8).reshape(2, 4) + >>> idx = (1, 3) + >>> np.insert(x, idx, 999, axis=1) + array([[ 0, 999, 1, 2, 999, 3], + [ 4, 999, 5, 6, 999, 7]]) + + """ + conv = _array_converter(arr) + arr, = conv.as_arrays(subok=False) + + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + # needed for np.matrix, which is still not 1d after being ravelled + ndim = arr.ndim + axis = ndim - 1 + else: + axis = normalize_axis_index(axis, ndim) + slobj = [slice(None)]*ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + # turn it into a range object + indices = arange(*obj.indices(N), dtype=intp) + else: + # need to copy obj, because indices will be changed in-place + indices = np.array(obj) + if indices.dtype == bool: + # See also delete + # 2012-10-11, NumPy 1.8 + warnings.warn( + "in the future insert will treat boolean arrays and " + "array-likes as a boolean index instead of casting it to " + "integer", FutureWarning, stacklevel=2) + indices = indices.astype(intp) + # Code after warning period: + #if obj.ndim != 1: + # raise ValueError('boolean array argument obj to insert ' + # 'must be one dimensional') + #indices = np.flatnonzero(obj) + elif indices.ndim > 1: + raise ValueError( + "index array argument obj to insert must be one dimensional " + "or scalar") + if indices.size == 1: + index = indices.item() + if index < -N or index > N: + raise IndexError(f"index {obj} is out of bounds for axis {axis} " + f"with size {N}") + if (index < 0): + index += N + + # There are some object array corner cases here, but we cannot avoid + # that: + values = array(values, copy=None, ndmin=arr.ndim, dtype=arr.dtype) + if indices.ndim == 0: + # broadcasting is very different here, since a[:,0,:] = ... behaves + # very different from a[:,[0],:] = ...! This changes values so that + # it works likes the second case. (here a[:,0:1,:]) + values = np.moveaxis(values, 0, axis) + numnew = values.shape[axis] + newshape[axis] += numnew + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, index) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(index, index+numnew) + new[tuple(slobj)] = values + slobj[axis] = slice(index+numnew, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(index, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + + return conv.wrap(new, to_scalar=False) + + elif indices.size == 0 and not isinstance(obj, np.ndarray): + # Can safely cast the empty list to intp + indices = indices.astype(intp) + + indices[indices < 0] += N + + numnew = len(indices) + order = indices.argsort(kind='mergesort') # stable sort + indices[order] += np.arange(numnew) + + newshape[axis] += numnew + old_mask = ones(newshape[axis], dtype=bool) + old_mask[indices] = False + + new = empty(newshape, arr.dtype, arrorder) + slobj2 = [slice(None)]*ndim + slobj[axis] = indices + slobj2[axis] = old_mask + new[tuple(slobj)] = values + new[tuple(slobj2)] = arr + + return conv.wrap(new, to_scalar=False) + + +def _append_dispatcher(arr, values, axis=None): + return (arr, values) + + +@array_function_dispatch(_append_dispatcher) +def append(arr, values, axis=None): + """ + Append values to the end of an array. + + Parameters + ---------- + arr : array_like + Values are appended to a copy of this array. + values : array_like + These values are appended to a copy of `arr`. It must be of the + correct shape (the same shape as `arr`, excluding `axis`). If + `axis` is not specified, `values` can be any shape and will be + flattened before use. + axis : int, optional + The axis along which `values` are appended. If `axis` is not + given, both `arr` and `values` are flattened before use. + + Returns + ------- + append : ndarray + A copy of `arr` with `values` appended to `axis`. Note that + `append` does not occur in-place: a new array is allocated and + filled. If `axis` is None, `out` is a flattened array. + + See Also + -------- + insert : Insert elements into an array. + delete : Delete elements from an array. + + Examples + -------- + >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) + array([1, 2, 3, ..., 7, 8, 9]) + + When `axis` is specified, `values` must have the correct shape. + + >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) + Traceback (most recent call last): + ... + ValueError: all the input arrays must have same number of dimensions, but + the array at index 0 has 2 dimension(s) and the array at index 1 has 1 + dimension(s) + + """ + arr = asanyarray(arr) + if axis is None: + if arr.ndim != 1: + arr = arr.ravel() + values = ravel(values) + axis = arr.ndim-1 + return concatenate((arr, values), axis=axis) + + +def _digitize_dispatcher(x, bins, right=None): + return (x, bins) + + +@array_function_dispatch(_digitize_dispatcher) +def digitize(x, bins, right=False): + """ + Return the indices of the bins to which each value in input array belongs. + + ========= ============= ============================ + `right` order of bins returned index `i` satisfies + ========= ============= ============================ + ``False`` increasing ``bins[i-1] <= x < bins[i]`` + ``True`` increasing ``bins[i-1] < x <= bins[i]`` + ``False`` decreasing ``bins[i-1] > x >= bins[i]`` + ``True`` decreasing ``bins[i-1] >= x > bins[i]`` + ========= ============= ============================ + + If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is + returned as appropriate. + + Parameters + ---------- + x : array_like + Input array to be binned. Prior to NumPy 1.10.0, this array had to + be 1-dimensional, but can now have any shape. + bins : array_like + Array of bins. It has to be 1-dimensional and monotonic. + right : bool, optional + Indicating whether the intervals include the right or the left bin + edge. Default behavior is (right==False) indicating that the interval + does not include the right edge. The left bin end is open in this + case, i.e., bins[i-1] <= x < bins[i] is the default behavior for + monotonically increasing bins. + + Returns + ------- + indices : ndarray of ints + Output array of indices, of same shape as `x`. + + Raises + ------ + ValueError + If `bins` is not monotonic. + TypeError + If the type of the input is complex. + + See Also + -------- + bincount, histogram, unique, searchsorted + + Notes + ----- + If values in `x` are such that they fall outside the bin range, + attempting to index `bins` with the indices that `digitize` returns + will result in an IndexError. + + .. versionadded:: 1.10.0 + + `numpy.digitize` is implemented in terms of `numpy.searchsorted`. + This means that a binary search is used to bin the values, which scales + much better for larger number of bins than the previous linear search. + It also removes the requirement for the input array to be 1-dimensional. + + For monotonically *increasing* `bins`, the following are equivalent:: + + np.digitize(x, bins, right=True) + np.searchsorted(bins, x, side='left') + + Note that as the order of the arguments are reversed, the side must be too. + The `searchsorted` call is marginally faster, as it does not do any + monotonicity checks. Perhaps more importantly, it supports all dtypes. + + Examples + -------- + >>> x = np.array([0.2, 6.4, 3.0, 1.6]) + >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) + >>> inds = np.digitize(x, bins) + >>> inds + array([1, 4, 3, 2]) + >>> for n in range(x.size): + ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) + ... + 0.0 <= 0.2 < 1.0 + 4.0 <= 6.4 < 10.0 + 2.5 <= 3.0 < 4.0 + 1.0 <= 1.6 < 2.5 + + >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) + >>> bins = np.array([0, 5, 10, 15, 20]) + >>> np.digitize(x,bins,right=True) + array([1, 2, 3, 4, 4]) + >>> np.digitize(x,bins,right=False) + array([1, 3, 3, 4, 5]) + """ + x = _nx.asarray(x) + bins = _nx.asarray(bins) + + # here for compatibility, searchsorted below is happy to take this + if np.issubdtype(x.dtype, _nx.complexfloating): + raise TypeError("x may not be complex") + + mono = _monotonicity(bins) + if mono == 0: + raise ValueError("bins must be monotonically increasing or decreasing") + + # this is backwards because the arguments below are swapped + side = 'left' if right else 'right' + if mono == -1: + # reverse the bins, and invert the results + return len(bins) - _nx.searchsorted(bins[::-1], x, side=side) + else: + return _nx.searchsorted(bins, x, side=side) diff --git a/phivenv/Lib/site-packages/numpy/lib/_function_base_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_function_base_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..98f7789a7efd432c48f247b077271b7a8101983c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_function_base_impl.pyi @@ -0,0 +1,700 @@ +import sys +from collections.abc import Sequence, Iterator, Callable, Iterable +from typing import ( + Literal as L, + Any, + TypeVar, + overload, + Protocol, + SupportsIndex, + SupportsInt, +) + +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +from numpy import ( + vectorize as vectorize, + ufunc, + generic, + floating, + complexfloating, + intp, + float64, + complex128, + timedelta64, + datetime64, + object_, + _OrderKACF, +) + +from numpy._typing import ( + NDArray, + ArrayLike, + DTypeLike, + _ShapeLike, + _ScalarLike_co, + _DTypeLike, + _ArrayLike, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, + _ArrayLikeObject_co, + _FloatLike_co, + _ComplexLike_co, +) + +from numpy._core.multiarray import ( + bincount as bincount, +) + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_SCT = TypeVar("_SCT", bound=generic) +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) + +_2Tuple = tuple[_T, _T] + +class _TrimZerosSequence(Protocol[_T_co]): + def __len__(self) -> int: ... + def __getitem__(self, key: slice, /) -> _T_co: ... + def __iter__(self) -> Iterator[Any]: ... + +class _SupportsWriteFlush(Protocol): + def write(self, s: str, /) -> object: ... + def flush(self) -> object: ... + +__all__: list[str] + +@overload +def rot90( + m: _ArrayLike[_SCT], + k: int = ..., + axes: tuple[int, int] = ..., +) -> NDArray[_SCT]: ... +@overload +def rot90( + m: ArrayLike, + k: int = ..., + axes: tuple[int, int] = ..., +) -> NDArray[Any]: ... + +@overload +def flip(m: _SCT, axis: None = ...) -> _SCT: ... +@overload +def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ... +@overload +def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +@overload +def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ... + +def iterable(y: object) -> TypeGuard[Iterable[Any]]: ... + +@overload +def average( + a: _ArrayLikeFloat_co, + axis: None = ..., + weights: None | _ArrayLikeFloat_co= ..., + returned: L[False] = ..., + keepdims: L[False] = ..., +) -> floating[Any]: ... +@overload +def average( + a: _ArrayLikeComplex_co, + axis: None = ..., + weights: None | _ArrayLikeComplex_co = ..., + returned: L[False] = ..., + keepdims: L[False] = ..., +) -> complexfloating[Any, Any]: ... +@overload +def average( + a: _ArrayLikeObject_co, + axis: None = ..., + weights: None | Any = ..., + returned: L[False] = ..., + keepdims: L[False] = ..., +) -> Any: ... +@overload +def average( + a: _ArrayLikeFloat_co, + axis: None = ..., + weights: None | _ArrayLikeFloat_co= ..., + returned: L[True] = ..., + keepdims: L[False] = ..., +) -> _2Tuple[floating[Any]]: ... +@overload +def average( + a: _ArrayLikeComplex_co, + axis: None = ..., + weights: None | _ArrayLikeComplex_co = ..., + returned: L[True] = ..., + keepdims: L[False] = ..., +) -> _2Tuple[complexfloating[Any, Any]]: ... +@overload +def average( + a: _ArrayLikeObject_co, + axis: None = ..., + weights: None | Any = ..., + returned: L[True] = ..., + keepdims: L[False] = ..., +) -> _2Tuple[Any]: ... +@overload +def average( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + weights: None | Any = ..., + returned: L[False] = ..., + keepdims: bool = ..., +) -> Any: ... +@overload +def average( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + weights: None | Any = ..., + returned: L[True] = ..., + keepdims: bool = ..., +) -> _2Tuple[Any]: ... + +@overload +def asarray_chkfinite( + a: _ArrayLike[_SCT], + dtype: None = ..., + order: _OrderKACF = ..., +) -> NDArray[_SCT]: ... +@overload +def asarray_chkfinite( + a: object, + dtype: None = ..., + order: _OrderKACF = ..., +) -> NDArray[Any]: ... +@overload +def asarray_chkfinite( + a: Any, + dtype: _DTypeLike[_SCT], + order: _OrderKACF = ..., +) -> NDArray[_SCT]: ... +@overload +def asarray_chkfinite( + a: Any, + dtype: DTypeLike, + order: _OrderKACF = ..., +) -> NDArray[Any]: ... + +# TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate` +# xref python/mypy#8645 +@overload +def piecewise( + x: _ArrayLike[_SCT], + condlist: ArrayLike, + funclist: Sequence[Any | Callable[..., Any]], + *args: Any, + **kw: Any, +) -> NDArray[_SCT]: ... +@overload +def piecewise( + x: ArrayLike, + condlist: ArrayLike, + funclist: Sequence[Any | Callable[..., Any]], + *args: Any, + **kw: Any, +) -> NDArray[Any]: ... + +def select( + condlist: Sequence[ArrayLike], + choicelist: Sequence[ArrayLike], + default: ArrayLike = ..., +) -> NDArray[Any]: ... + +@overload +def copy( + a: _ArrayType, + order: _OrderKACF, + subok: L[True], +) -> _ArrayType: ... +@overload +def copy( + a: _ArrayType, + order: _OrderKACF = ..., + *, + subok: L[True], +) -> _ArrayType: ... +@overload +def copy( + a: _ArrayLike[_SCT], + order: _OrderKACF = ..., + subok: L[False] = ..., +) -> NDArray[_SCT]: ... +@overload +def copy( + a: ArrayLike, + order: _OrderKACF = ..., + subok: L[False] = ..., +) -> NDArray[Any]: ... + +def gradient( + f: ArrayLike, + *varargs: ArrayLike, + axis: None | _ShapeLike = ..., + edge_order: L[1, 2] = ..., +) -> Any: ... + +@overload +def diff( + a: _T, + n: L[0], + axis: SupportsIndex = ..., + prepend: ArrayLike = ..., + append: ArrayLike = ..., +) -> _T: ... +@overload +def diff( + a: ArrayLike, + n: int = ..., + axis: SupportsIndex = ..., + prepend: ArrayLike = ..., + append: ArrayLike = ..., +) -> NDArray[Any]: ... + +@overload +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: None | _FloatLike_co = ..., + right: None | _FloatLike_co = ..., + period: None | _FloatLike_co = ..., +) -> NDArray[float64]: ... +@overload +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeComplex_co, + left: None | _ComplexLike_co = ..., + right: None | _ComplexLike_co = ..., + period: None | _FloatLike_co = ..., +) -> NDArray[complex128]: ... + +@overload +def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]: ... +@overload +def angle(z: object_, deg: bool = ...) -> Any: ... +@overload +def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating[Any]]: ... +@overload +def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ... + +@overload +def unwrap( + p: _ArrayLikeFloat_co, + discont: None | float = ..., + axis: int = ..., + *, + period: float = ..., +) -> NDArray[floating[Any]]: ... +@overload +def unwrap( + p: _ArrayLikeObject_co, + discont: None | float = ..., + axis: int = ..., + *, + period: float = ..., +) -> NDArray[object_]: ... + +def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]: ... + +def trim_zeros( + filt: _TrimZerosSequence[_T], + trim: L["f", "b", "fb", "bf"] = ..., +) -> _T: ... + +@overload +def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ... + +def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ... + +def disp( + mesg: object, + device: None | _SupportsWriteFlush = ..., + linefeed: bool = ..., +) -> None: ... + +@overload +def cov( + m: _ArrayLikeFloat_co, + y: None | _ArrayLikeFloat_co = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: None | SupportsIndex | SupportsInt = ..., + fweights: None | ArrayLike = ..., + aweights: None | ArrayLike = ..., + *, + dtype: None = ..., +) -> NDArray[floating[Any]]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: None | SupportsIndex | SupportsInt = ..., + fweights: None | ArrayLike = ..., + aweights: None | ArrayLike = ..., + *, + dtype: None = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: None | SupportsIndex | SupportsInt = ..., + fweights: None | ArrayLike = ..., + aweights: None | ArrayLike = ..., + *, + dtype: _DTypeLike[_SCT], +) -> NDArray[_SCT]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: None | SupportsIndex | SupportsInt = ..., + fweights: None | ArrayLike = ..., + aweights: None | ArrayLike = ..., + *, + dtype: DTypeLike, +) -> NDArray[Any]: ... + +# NOTE `bias` and `ddof` have been deprecated +@overload +def corrcoef( + m: _ArrayLikeFloat_co, + y: None | _ArrayLikeFloat_co = ..., + rowvar: bool = ..., + *, + dtype: None = ..., +) -> NDArray[floating[Any]]: ... +@overload +def corrcoef( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + *, + dtype: None = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def corrcoef( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + *, + dtype: _DTypeLike[_SCT], +) -> NDArray[_SCT]: ... +@overload +def corrcoef( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + *, + dtype: DTypeLike, +) -> NDArray[Any]: ... + +def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ... + +def bartlett(M: _FloatLike_co) -> NDArray[floating[Any]]: ... + +def hanning(M: _FloatLike_co) -> NDArray[floating[Any]]: ... + +def hamming(M: _FloatLike_co) -> NDArray[floating[Any]]: ... + +def i0(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... + +def kaiser( + M: _FloatLike_co, + beta: _FloatLike_co, +) -> NDArray[floating[Any]]: ... + +@overload +def sinc(x: _FloatLike_co) -> floating[Any]: ... +@overload +def sinc(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +@overload +def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def median( + a: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> floating[Any]: ... +@overload +def median( + a: _ArrayLikeComplex_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> complexfloating[Any, Any]: ... +@overload +def median( + a: _ArrayLikeTD64_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> timedelta64: ... +@overload +def median( + a: _ArrayLikeObject_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> Any: ... +@overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> Any: ... +@overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + out: _ArrayType = ..., + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> _ArrayType: ... + +_MethodKind = L[ + "inverted_cdf", + "averaged_inverted_cdf", + "closest_observation", + "interpolated_inverted_cdf", + "hazen", + "weibull", + "linear", + "median_unbiased", + "normal_unbiased", + "lower", + "higher", + "midpoint", + "nearest", +] + +@overload +def percentile( + a: _ArrayLikeFloat_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: None | _ArrayLikeFloat_co = ..., +) -> floating[Any]: ... +@overload +def percentile( + a: _ArrayLikeComplex_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: None | _ArrayLikeFloat_co = ..., +) -> complexfloating[Any, Any]: ... +@overload +def percentile( + a: _ArrayLikeTD64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: None | _ArrayLikeFloat_co = ..., +) -> timedelta64: ... +@overload +def percentile( + a: _ArrayLikeDT64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: None | _ArrayLikeFloat_co = ..., +) -> datetime64: ... +@overload +def percentile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: None | _ArrayLikeFloat_co = ..., +) -> Any: ... +@overload +def percentile( + a: _ArrayLikeFloat_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: None | _ArrayLikeFloat_co = ..., +) -> NDArray[floating[Any]]: ... +@overload +def percentile( + a: _ArrayLikeComplex_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: None | _ArrayLikeFloat_co = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def percentile( + a: _ArrayLikeTD64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: None | _ArrayLikeFloat_co = ..., +) -> NDArray[timedelta64]: ... +@overload +def percentile( + a: _ArrayLikeDT64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: None | _ArrayLikeFloat_co = ..., +) -> NDArray[datetime64]: ... +@overload +def percentile( + a: _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: None | _ArrayLikeFloat_co = ..., +) -> NDArray[object_]: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None | _ShapeLike = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + *, + weights: None | _ArrayLikeFloat_co = ..., +) -> Any: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None | _ShapeLike = ..., + out: _ArrayType = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + *, + weights: None | _ArrayLikeFloat_co = ..., +) -> _ArrayType: ... + +# NOTE: Not an alias, but they do have identical signatures +# (that we can reuse) +quantile = percentile + +def meshgrid( + *xi: ArrayLike, + copy: bool = ..., + sparse: bool = ..., + indexing: L["xy", "ij"] = ..., +) -> tuple[NDArray[Any], ...]: ... + +@overload +def delete( + arr: _ArrayLike[_SCT], + obj: slice | _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload +def delete( + arr: ArrayLike, + obj: slice | _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., +) -> NDArray[Any]: ... + +@overload +def insert( + arr: _ArrayLike[_SCT], + obj: slice | _ArrayLikeInt_co, + values: ArrayLike, + axis: None | SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload +def insert( + arr: ArrayLike, + obj: slice | _ArrayLikeInt_co, + values: ArrayLike, + axis: None | SupportsIndex = ..., +) -> NDArray[Any]: ... + +def append( + arr: ArrayLike, + values: ArrayLike, + axis: None | SupportsIndex = ..., +) -> NDArray[Any]: ... + +@overload +def digitize( + x: _FloatLike_co, + bins: _ArrayLikeFloat_co, + right: bool = ..., +) -> intp: ... +@overload +def digitize( + x: _ArrayLikeFloat_co, + bins: _ArrayLikeFloat_co, + right: bool = ..., +) -> NDArray[intp]: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_histograms_impl.py b/phivenv/Lib/site-packages/numpy/lib/_histograms_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..3169e2047d47469a4d13e55fdc2608efe4e11c15 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_histograms_impl.py @@ -0,0 +1,1082 @@ +""" +Histogram-related functions +""" +import contextlib +import functools +import operator +import warnings + +import numpy as np +from numpy._core import overrides + +__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges'] + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + +# range is a keyword argument to many functions, so save the builtin so they can +# use it. +_range = range + + +def _ptp(x): + """Peak-to-peak value of x. + + This implementation avoids the problem of signed integer arrays having a + peak-to-peak value that cannot be represented with the array's data type. + This function returns an unsigned value for signed integer arrays. + """ + return _unsigned_subtract(x.max(), x.min()) + + +def _hist_bin_sqrt(x, range): + """ + Square root histogram bin estimator. + + Bin width is inversely proportional to the data size. Used by many + programs for its simplicity. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / np.sqrt(x.size) + + +def _hist_bin_sturges(x, range): + """ + Sturges histogram bin estimator. + + A very simplistic estimator based on the assumption of normality of + the data. This estimator has poor performance for non-normal data, + which becomes especially obvious for large data sets. The estimate + depends only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / (np.log2(x.size) + 1.0) + + +def _hist_bin_rice(x, range): + """ + Rice histogram bin estimator. + + Another simple estimator with no normality assumption. It has better + performance for large data than Sturges, but tends to overestimate + the number of bins. The number of bins is proportional to the cube + root of data size (asymptotically optimal). The estimate depends + only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / (2.0 * x.size ** (1.0 / 3)) + + +def _hist_bin_scott(x, range): + """ + Scott histogram bin estimator. + + The binwidth is proportional to the standard deviation of the data + and inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) + + +def _hist_bin_stone(x, range): + """ + Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). + + The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution. + The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule. + https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule + + This paper by Stone appears to be the origination of this rule. + https://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + range : (float, float) + The lower and upper range of the bins. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + + n = x.size + ptp_x = _ptp(x) + if n <= 1 or ptp_x == 0: + return 0 + + def jhat(nbins): + hh = ptp_x / nbins + p_k = np.histogram(x, bins=nbins, range=range)[0] / n + return (2 - (n + 1) * p_k.dot(p_k)) / hh + + nbins_upper_bound = max(100, int(np.sqrt(n))) + nbins = min(_range(1, nbins_upper_bound + 1), key=jhat) + if nbins == nbins_upper_bound: + warnings.warn("The number of bins estimated may be suboptimal.", + RuntimeWarning, stacklevel=3) + return ptp_x / nbins + + +def _hist_bin_doane(x, range): + """ + Doane's histogram bin estimator. + + Improved version of Sturges' formula which works better for + non-normal data. See + stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + if x.size > 2: + sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) + sigma = np.std(x) + if sigma > 0.0: + # These three operations add up to + # g1 = np.mean(((x - np.mean(x)) / sigma)**3) + # but use only one temp array instead of three + temp = x - np.mean(x) + np.true_divide(temp, sigma, temp) + np.power(temp, 3, temp) + g1 = np.mean(temp) + return _ptp(x) / (1.0 + np.log2(x.size) + + np.log2(1.0 + np.absolute(g1) / sg1)) + return 0.0 + + +def _hist_bin_fd(x, range): + """ + The Freedman-Diaconis histogram bin estimator. + + The Freedman-Diaconis rule uses interquartile range (IQR) to + estimate binwidth. It is considered a variation of the Scott rule + with more robustness as the IQR is less affected by outliers than + the standard deviation. However, the IQR depends on fewer points + than the standard deviation, so it is less accurate, especially for + long tailed distributions. + + If the IQR is 0, this function returns 0 for the bin width. + Binwidth is inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + iqr = np.subtract(*np.percentile(x, [75, 25])) + return 2.0 * iqr * x.size ** (-1.0 / 3.0) + + +def _hist_bin_auto(x, range): + """ + Histogram bin estimator that uses the minimum width of the + Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero. + If the bin width from the FD estimator is 0, the Sturges estimator is used. + + The FD estimator is usually the most robust method, but its width + estimate tends to be too large for small `x` and bad for data with limited + variance. The Sturges estimator is quite good for small (<1000) datasets + and is the default in the R language. This method gives good off-the-shelf + behaviour. + + .. versionchanged:: 1.15.0 + If there is limited variance the IQR can be 0, which results in the + FD bin width being 0 too. This is not a valid bin width, so + ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal. + If the IQR is 0, it's unlikely any variance-based estimators will be of + use, so we revert to the Sturges estimator, which only uses the size of the + dataset in its calculation. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + + See Also + -------- + _hist_bin_fd, _hist_bin_sturges + """ + fd_bw = _hist_bin_fd(x, range) + sturges_bw = _hist_bin_sturges(x, range) + del range # unused + if fd_bw: + return min(fd_bw, sturges_bw) + else: + # limited variance, so we return a len dependent bw estimator + return sturges_bw + +# Private dict initialized at module load time +_hist_bin_selectors = {'stone': _hist_bin_stone, + 'auto': _hist_bin_auto, + 'doane': _hist_bin_doane, + 'fd': _hist_bin_fd, + 'rice': _hist_bin_rice, + 'scott': _hist_bin_scott, + 'sqrt': _hist_bin_sqrt, + 'sturges': _hist_bin_sturges} + + +def _ravel_and_check_weights(a, weights): + """ Check a and weights have matching shapes, and ravel both """ + a = np.asarray(a) + + # Ensure that the array is a "subtractable" dtype + if a.dtype == np.bool: + warnings.warn("Converting input from {} to {} for compatibility." + .format(a.dtype, np.uint8), + RuntimeWarning, stacklevel=3) + a = a.astype(np.uint8) + + if weights is not None: + weights = np.asarray(weights) + if weights.shape != a.shape: + raise ValueError( + 'weights should have the same shape as a.') + weights = weights.ravel() + a = a.ravel() + return a, weights + + +def _get_outer_edges(a, range): + """ + Determine the outer bin edges to use, from either the data or the range + argument + """ + if range is not None: + first_edge, last_edge = range + if first_edge > last_edge: + raise ValueError( + 'max must be larger than min in range parameter.') + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + "supplied range of [{}, {}] is not finite".format(first_edge, last_edge)) + elif a.size == 0: + # handle empty arrays. Can't determine range, so use 0-1. + first_edge, last_edge = 0, 1 + else: + first_edge, last_edge = a.min(), a.max() + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge)) + + # expand empty range to avoid divide by zero + if first_edge == last_edge: + first_edge = first_edge - 0.5 + last_edge = last_edge + 0.5 + + return first_edge, last_edge + + +def _unsigned_subtract(a, b): + """ + Subtract two values where a >= b, and produce an unsigned result + + This is needed when finding the difference between the upper and lower + bound of an int16 histogram + """ + # coerce to a single type + signed_to_unsigned = { + np.byte: np.ubyte, + np.short: np.ushort, + np.intc: np.uintc, + np.int_: np.uint, + np.longlong: np.ulonglong + } + dt = np.result_type(a, b) + try: + unsigned_dt = signed_to_unsigned[dt.type] + except KeyError: + return np.subtract(a, b, dtype=dt) + else: + # we know the inputs are integers, and we are deliberately casting + # signed to unsigned. The input may be negative python integers so + # ensure we pass in arrays with the initial dtype (related to NEP 50). + return np.subtract(np.asarray(a, dtype=dt), np.asarray(b, dtype=dt), + casting='unsafe', dtype=unsigned_dt) + + +def _get_bin_edges(a, bins, range, weights): + """ + Computes the bins used internally by `histogram`. + + Parameters + ========== + a : ndarray + Ravelled data array + bins, range + Forwarded arguments from `histogram`. + weights : ndarray, optional + Ravelled weights array, or None + + Returns + ======= + bin_edges : ndarray + Array of bin edges + uniform_bins : (Number, Number, int): + The upper bound, lowerbound, and number of bins, used in the optimized + implementation of `histogram` that works on uniform bins. + """ + # parse the overloaded bins argument + n_equal_bins = None + bin_edges = None + + if isinstance(bins, str): + bin_name = bins + # if `bins` is a string for an automatic method, + # this will replace it with the number of bins calculated + if bin_name not in _hist_bin_selectors: + raise ValueError( + "{!r} is not a valid estimator for `bins`".format(bin_name)) + if weights is not None: + raise TypeError("Automated estimation of the number of " + "bins is not supported for weighted data") + + first_edge, last_edge = _get_outer_edges(a, range) + + # truncate the range if needed + if range is not None: + keep = (a >= first_edge) + keep &= (a <= last_edge) + if not np.logical_and.reduce(keep): + a = a[keep] + + if a.size == 0: + n_equal_bins = 1 + else: + # Do not call selectors on empty arrays + width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge)) + if width: + n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width)) + else: + # Width can be zero for some estimators, e.g. FD when + # the IQR of the data is zero. + n_equal_bins = 1 + + elif np.ndim(bins) == 0: + try: + n_equal_bins = operator.index(bins) + except TypeError as e: + raise TypeError( + '`bins` must be an integer, a string, or an array') from e + if n_equal_bins < 1: + raise ValueError('`bins` must be positive, when an integer') + + first_edge, last_edge = _get_outer_edges(a, range) + + elif np.ndim(bins) == 1: + bin_edges = np.asarray(bins) + if np.any(bin_edges[:-1] > bin_edges[1:]): + raise ValueError( + '`bins` must increase monotonically, when an array') + + else: + raise ValueError('`bins` must be 1d, when an array') + + if n_equal_bins is not None: + # gh-10322 means that type resolution rules are dependent on array + # shapes. To avoid this causing problems, we pick a type now and stick + # with it throughout. + bin_type = np.result_type(first_edge, last_edge, a) + if np.issubdtype(bin_type, np.integer): + bin_type = np.result_type(bin_type, float) + + # bin edges must be computed + bin_edges = np.linspace( + first_edge, last_edge, n_equal_bins + 1, + endpoint=True, dtype=bin_type) + return bin_edges, (first_edge, last_edge, n_equal_bins) + else: + return bin_edges, None + + +def _search_sorted_inclusive(a, v): + """ + Like `searchsorted`, but where the last item in `v` is placed on the right. + + In the context of a histogram, this makes the last bin edge inclusive + """ + return np.concatenate(( + a.searchsorted(v[:-1], 'left'), + a.searchsorted(v[-1:], 'right') + )) + + +def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_bin_edges_dispatcher) +def histogram_bin_edges(a, bins=10, range=None, weights=None): + r""" + Function to calculate only the edges of the bins used by the `histogram` + function. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines the bin edges, including the rightmost + edge, allowing for non-uniform bin widths. + + If `bins` is a string from the list below, `histogram_bin_edges` will + use the method chosen to calculate the optimal bin width and + consequently the number of bins (see the Notes section for more detail + on the estimators) from the data that falls within the requested range. + While the bin width will be optimal for the actual data + in the range, the number of bins will be computed to fill the + entire range, including the empty portions. For visualisation, + using the 'auto' option is suggested. Weighted data is not + supported for automated bin size selection. + + 'auto' + Minimum bin width between the 'sturges' and 'fd' estimators. + Provides good all-around performance. + + 'fd' (Freedman Diaconis Estimator) + Robust (resilient to outliers) estimator that takes into + account data variability and data size. + + 'doane' + An improved version of Sturges' estimator that works better + with non-normal datasets. + + 'scott' + Less robust estimator that takes into account data variability + and data size. + + 'stone' + Estimator based on leave-one-out cross-validation estimate of + the integrated squared error. Can be regarded as a generalization + of Scott's rule. + + 'rice' + Estimator does not take variability into account, only data + size. Commonly overestimates number of bins required. + + 'sturges' + R's default method, only accounts for data size. Only + optimal for gaussian data and underestimates number of bins + for large non-gaussian datasets. + + 'sqrt' + Square root (of data size) estimator, used by Excel and + other programs for its speed and simplicity. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). This is currently not used by any of the bin estimators, + but may be in the future. + + Returns + ------- + bin_edges : array of dtype float + The edges to pass into `histogram` + + See Also + -------- + histogram + + Notes + ----- + The methods to estimate the optimal number of bins are well founded + in literature, and are inspired by the choices R provides for + histogram visualisation. Note that having the number of bins + proportional to :math:`n^{1/3}` is asymptotically optimal, which is + why it appears in most estimators. These are simply plug-in methods + that give good starting points for number of bins. In the equations + below, :math:`h` is the binwidth and :math:`n_h` is the number of + bins. All estimators that compute bin counts are recast to bin width + using the `ptp` of the data. The final bin count is obtained from + ``np.round(np.ceil(range / h))``. The final bin width is often less + than what is returned by the estimators below. + + 'auto' (minimum bin width of the 'sturges' and 'fd' estimators) + A compromise to get a good value. For small datasets the Sturges + value will usually be chosen, while larger datasets will usually + default to FD. Avoids the overly conservative behaviour of FD + and Sturges for small and large datasets respectively. + Switchover point is usually :math:`a.size \approx 1000`. + + 'fd' (Freedman Diaconis Estimator) + .. math:: h = 2 \frac{IQR}{n^{1/3}} + + The binwidth is proportional to the interquartile range (IQR) + and inversely proportional to cube root of a.size. Can be too + conservative for small datasets, but is quite good for large + datasets. The IQR is very robust to outliers. + + 'scott' + .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}} + + The binwidth is proportional to the standard deviation of the + data and inversely proportional to cube root of ``x.size``. Can + be too conservative for small datasets, but is quite good for + large datasets. The standard deviation is not very robust to + outliers. Values are very similar to the Freedman-Diaconis + estimator in the absence of outliers. + + 'rice' + .. math:: n_h = 2n^{1/3} + + The number of bins is only proportional to cube root of + ``a.size``. It tends to overestimate the number of bins and it + does not take into account data variability. + + 'sturges' + .. math:: n_h = \log _{2}(n) + 1 + + The number of bins is the base 2 log of ``a.size``. This + estimator assumes normality of data and is too conservative for + larger, non-normal datasets. This is the default method in R's + ``hist`` method. + + 'doane' + .. math:: n_h = 1 + \log_{2}(n) + + \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right) + + g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right] + + \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} + + An improved version of Sturges' formula that produces better + estimates for non-normal datasets. This estimator attempts to + account for the skew of the data. + + 'sqrt' + .. math:: n_h = \sqrt n + + The simplest and fastest estimator. Only takes into account the + data size. + + Examples + -------- + >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) + >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) + array([0. , 0.25, 0.5 , 0.75, 1. ]) + >>> np.histogram_bin_edges(arr, bins=2) + array([0. , 2.5, 5. ]) + + For consistency with histogram, an array of pre-computed bins is + passed through unmodified: + + >>> np.histogram_bin_edges(arr, [1, 2]) + array([1, 2]) + + This function allows one set of bins to be computed, and reused across + multiple histograms: + + >>> shared_bins = np.histogram_bin_edges(arr, bins='auto') + >>> shared_bins + array([0., 1., 2., 3., 4., 5.]) + + >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1]) + >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins) + >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins) + + >>> hist_0; hist_1 + array([1, 1, 0, 1, 0]) + array([2, 0, 1, 1, 2]) + + Which gives more easily comparable results than using separate bins for + each histogram: + + >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto') + >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto') + >>> hist_0; hist_1 + array([1, 1, 1]) + array([2, 1, 1, 2]) + >>> bins_0; bins_1 + array([0., 1., 2., 3.]) + array([0. , 1.25, 2.5 , 3.75, 5. ]) + + """ + a, weights = _ravel_and_check_weights(a, weights) + bin_edges, _ = _get_bin_edges(a, bins, range, weights) + return bin_edges + + +def _histogram_dispatcher( + a, bins=None, range=None, density=None, weights=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_dispatcher) +def histogram(a, bins=10, range=None, density=None, weights=None): + r""" + Compute the histogram of a dataset. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines a monotonically increasing array of bin edges, + including the rightmost edge, allowing for non-uniform bin widths. + + .. versionadded:: 1.11.0 + + If `bins` is a string, it defines the method used to calculate the + optimal bin width, as defined by `histogram_bin_edges`. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). If `density` is True, the weights are + normalized, so that the integral of the density over the range + remains 1. + Please note that the ``dtype`` of `weights` will also become the + ``dtype`` of the returned accumulator (`hist`), so it must be + large enough to hold accumulated values as well. + density : bool, optional + If ``False``, the result will contain the number of samples in + each bin. If ``True``, the result is the value of the + probability *density* function at the bin, normalized such that + the *integral* over the range is 1. Note that the sum of the + histogram values will not be equal to 1 unless bins of unity + width are chosen; it is not a probability *mass* function. + + Returns + ------- + hist : array + The values of the histogram. See `density` and `weights` for a + description of the possible semantics. If `weights` are given, + ``hist.dtype`` will be taken from `weights`. + bin_edges : array of dtype float + Return the bin edges ``(length(hist)+1)``. + + + See Also + -------- + histogramdd, bincount, searchsorted, digitize, histogram_bin_edges + + Notes + ----- + All but the last (righthand-most) bin is half-open. In other words, + if `bins` is:: + + [1, 2, 3, 4] + + then the first bin is ``[1, 2)`` (including 1, but excluding 2) and + the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which + *includes* 4. + + + Examples + -------- + >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) + (array([0, 2, 1]), array([0, 1, 2, 3])) + >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) + (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) + >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) + (array([1, 4, 1]), array([0, 1, 2, 3])) + + >>> a = np.arange(5) + >>> hist, bin_edges = np.histogram(a, density=True) + >>> hist + array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) + >>> hist.sum() + 2.4999999999999996 + >>> np.sum(hist * np.diff(bin_edges)) + 1.0 + + .. versionadded:: 1.11.0 + + Automated Bin Selection Methods example, using 2 peak random data + with 2000 points. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + import numpy as np + + rng = np.random.RandomState(10) # deterministic random data + a = np.hstack((rng.normal(size=1000), + rng.normal(loc=5, scale=2, size=1000))) + plt.hist(a, bins='auto') # arguments are passed to np.histogram + plt.title("Histogram with 'auto' bins") + plt.show() + + """ + a, weights = _ravel_and_check_weights(a, weights) + + bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights) + + # Histogram is an integer or a float array depending on the weights. + if weights is None: + ntype = np.dtype(np.intp) + else: + ntype = weights.dtype + + # We set a block size, as this allows us to iterate over chunks when + # computing histograms, to minimize memory usage. + BLOCK = 65536 + + # The fast path uses bincount, but that only works for certain types + # of weight + simple_weights = ( + weights is None or + np.can_cast(weights.dtype, np.double) or + np.can_cast(weights.dtype, complex) + ) + + if uniform_bins is not None and simple_weights: + # Fast algorithm for equal bins + # We now convert values of a to bin indices, under the assumption of + # equal bin widths (which is valid here). + first_edge, last_edge, n_equal_bins = uniform_bins + + # Initialize empty histogram + n = np.zeros(n_equal_bins, ntype) + + # Pre-compute histogram scaling factor + norm_numerator = n_equal_bins + norm_denom = _unsigned_subtract(last_edge, first_edge) + + # We iterate over blocks here for two reasons: the first is that for + # large arrays, it is actually faster (for example for a 10^8 array it + # is 2x as fast) and it results in a memory footprint 3x lower in the + # limit of large arrays. + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i+BLOCK] + if weights is None: + tmp_w = None + else: + tmp_w = weights[i:i + BLOCK] + + # Only include values in the right range + keep = (tmp_a >= first_edge) + keep &= (tmp_a <= last_edge) + if not np.logical_and.reduce(keep): + tmp_a = tmp_a[keep] + if tmp_w is not None: + tmp_w = tmp_w[keep] + + # This cast ensures no type promotions occur below, which gh-10322 + # make unpredictable. Getting it wrong leads to precision errors + # like gh-8123. + tmp_a = tmp_a.astype(bin_edges.dtype, copy=False) + + # Compute the bin indices, and for values that lie exactly on + # last_edge we need to subtract one + f_indices = ((_unsigned_subtract(tmp_a, first_edge) / norm_denom) + * norm_numerator) + indices = f_indices.astype(np.intp) + indices[indices == n_equal_bins] -= 1 + + # The index computation is not guaranteed to give exactly + # consistent results within ~1 ULP of the bin edges. + decrement = tmp_a < bin_edges[indices] + indices[decrement] -= 1 + # The last bin includes the right edge. The other bins do not. + increment = ((tmp_a >= bin_edges[indices + 1]) + & (indices != n_equal_bins - 1)) + indices[increment] += 1 + + # We now compute the histogram using bincount + if ntype.kind == 'c': + n.real += np.bincount(indices, weights=tmp_w.real, + minlength=n_equal_bins) + n.imag += np.bincount(indices, weights=tmp_w.imag, + minlength=n_equal_bins) + else: + n += np.bincount(indices, weights=tmp_w, + minlength=n_equal_bins).astype(ntype) + else: + # Compute via cumulative histogram + cum_n = np.zeros(bin_edges.shape, ntype) + if weights is None: + for i in _range(0, len(a), BLOCK): + sa = np.sort(a[i:i+BLOCK]) + cum_n += _search_sorted_inclusive(sa, bin_edges) + else: + zero = np.zeros(1, dtype=ntype) + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i+BLOCK] + tmp_w = weights[i:i+BLOCK] + sorting_index = np.argsort(tmp_a) + sa = tmp_a[sorting_index] + sw = tmp_w[sorting_index] + cw = np.concatenate((zero, sw.cumsum())) + bin_index = _search_sorted_inclusive(sa, bin_edges) + cum_n += cw[bin_index] + + n = np.diff(cum_n) + + if density: + db = np.array(np.diff(bin_edges), float) + return n/db/n.sum(), bin_edges + + return n, bin_edges + + +def _histogramdd_dispatcher(sample, bins=None, range=None, density=None, + weights=None): + if hasattr(sample, 'shape'): # same condition as used in histogramdd + yield sample + else: + yield from sample + with contextlib.suppress(TypeError): + yield from bins + yield weights + + +@array_function_dispatch(_histogramdd_dispatcher) +def histogramdd(sample, bins=10, range=None, density=None, weights=None): + """ + Compute the multidimensional histogram of some data. + + Parameters + ---------- + sample : (N, D) array, or (N, D) array_like + The data to be histogrammed. + + Note the unusual interpretation of sample when an array_like: + + * When an array, each row is a coordinate in a D-dimensional space - + such as ``histogramdd(np.array([p1, p2, p3]))``. + * When an array_like, each element is the list of values for single + coordinate - such as ``histogramdd((X, Y, Z))``. + + The first form should be preferred. + + bins : sequence or int, optional + The bin specification: + + * A sequence of arrays describing the monotonically increasing bin + edges along each dimension. + * The number of bins for each dimension (nx, ny, ... =bins) + * The number of bins for all dimensions (nx=ny=...=bins). + + range : sequence, optional + A sequence of length D, each an optional (lower, upper) tuple giving + the outer bin edges to be used if the edges are not given explicitly in + `bins`. + An entry of None in the sequence results in the minimum and maximum + values being used for the corresponding dimension. + The default, None, is equivalent to passing a tuple of D None values. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_volume``. + weights : (N,) array_like, optional + An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. + Weights are normalized to 1 if density is True. If density is False, + the values of the returned histogram are equal to the sum of the + weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray + The multidimensional histogram of sample x. See density and weights + for the different possible semantics. + edges : tuple of ndarrays + A tuple of D arrays describing the bin edges for each dimension. + + See Also + -------- + histogram: 1-D histogram + histogram2d: 2-D histogram + + Examples + -------- + >>> r = np.random.randn(100,3) + >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) + >>> H.shape, edges[0].size, edges[1].size, edges[2].size + ((5, 8, 4), 6, 9, 5) + + """ + + try: + # Sample is an ND-array. + N, D = sample.shape + except (AttributeError, ValueError): + # Sample is a sequence of 1D arrays. + sample = np.atleast_2d(sample).T + N, D = sample.shape + + nbin = np.empty(D, np.intp) + edges = D*[None] + dedges = D*[None] + if weights is not None: + weights = np.asarray(weights) + + try: + M = len(bins) + if M != D: + raise ValueError( + 'The dimension of bins must be equal to the dimension of the ' + 'sample x.') + except TypeError: + # bins is an integer + bins = D*[bins] + + # normalize the range argument + if range is None: + range = (None,) * D + elif len(range) != D: + raise ValueError('range argument must have one entry per dimension') + + # Create edge arrays + for i in _range(D): + if np.ndim(bins[i]) == 0: + if bins[i] < 1: + raise ValueError( + '`bins[{}]` must be positive, when an integer'.format(i)) + smin, smax = _get_outer_edges(sample[:,i], range[i]) + try: + n = operator.index(bins[i]) + + except TypeError as e: + raise TypeError( + "`bins[{}]` must be an integer, when a scalar".format(i) + ) from e + + edges[i] = np.linspace(smin, smax, n + 1) + elif np.ndim(bins[i]) == 1: + edges[i] = np.asarray(bins[i]) + if np.any(edges[i][:-1] > edges[i][1:]): + raise ValueError( + '`bins[{}]` must be monotonically increasing, when an array' + .format(i)) + else: + raise ValueError( + '`bins[{}]` must be a scalar or 1d array'.format(i)) + + nbin[i] = len(edges[i]) + 1 # includes an outlier on each end + dedges[i] = np.diff(edges[i]) + + # Compute the bin number each sample falls into. + Ncount = tuple( + # avoid np.digitize to work around gh-11022 + np.searchsorted(edges[i], sample[:, i], side='right') + for i in _range(D) + ) + + # Using digitize, values that fall on an edge are put in the right bin. + # For the rightmost bin, we want values equal to the right edge to be + # counted in the last bin, and not as an outlier. + for i in _range(D): + # Find which points are on the rightmost edge. + on_edge = (sample[:, i] == edges[i][-1]) + # Shift these points one bin to the left. + Ncount[i][on_edge] -= 1 + + # Compute the sample indices in the flattened histogram matrix. + # This raises an error if the array is too large. + xy = np.ravel_multi_index(Ncount, nbin) + + # Compute the number of repetitions in xy and assign it to the + # flattened histmat. + hist = np.bincount(xy, weights, minlength=nbin.prod()) + + # Shape into a proper matrix + hist = hist.reshape(nbin) + + # This preserves the (bad) behavior observed in gh-7845, for now. + hist = hist.astype(float, casting='safe') + + # Remove outliers (indices 0 and -1 for each dimension). + core = D*(slice(1, -1),) + hist = hist[core] + + if density: + # calculate the probability density function + s = hist.sum() + for i in _range(D): + shape = np.ones(D, int) + shape[i] = nbin[i] - 2 + hist = hist / dedges[i].reshape(shape) + hist /= s + + if (hist.shape != nbin - 2).any(): + raise RuntimeError( + "Internal Shape Error") + return hist, edges diff --git a/phivenv/Lib/site-packages/numpy/lib/_histograms_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_histograms_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ff24839e02ff2d1ea16cdfc819091554f33b3569 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_histograms_impl.pyi @@ -0,0 +1,47 @@ +from collections.abc import Sequence +from typing import ( + Literal as L, + Any, + SupportsIndex, +) + +from numpy._typing import ( + NDArray, + ArrayLike, +) + +_BinKind = L[ + "stone", + "auto", + "doane", + "fd", + "rice", + "scott", + "sqrt", + "sturges", +] + +__all__: list[str] + +def histogram_bin_edges( + a: ArrayLike, + bins: _BinKind | SupportsIndex | ArrayLike = ..., + range: None | tuple[float, float] = ..., + weights: None | ArrayLike = ..., +) -> NDArray[Any]: ... + +def histogram( + a: ArrayLike, + bins: _BinKind | SupportsIndex | ArrayLike = ..., + range: None | tuple[float, float] = ..., + density: bool = ..., + weights: None | ArrayLike = ..., +) -> tuple[NDArray[Any], NDArray[Any]]: ... + +def histogramdd( + sample: ArrayLike, + bins: SupportsIndex | ArrayLike = ..., + range: Sequence[tuple[float, float]] = ..., + density: None | bool = ..., + weights: None | ArrayLike = ..., +) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_index_tricks_impl.py b/phivenv/Lib/site-packages/numpy/lib/_index_tricks_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..7745f103c17098426f67105694f7386f00aecfeb --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_index_tricks_impl.py @@ -0,0 +1,1059 @@ +import functools +import sys +import math +import warnings + +import numpy as np +from .._utils import set_module +import numpy._core.numeric as _nx +from numpy._core.numeric import ScalarType, array +from numpy._core.numerictypes import issubdtype + +import numpy.matrixlib as matrixlib +from numpy._core.multiarray import ravel_multi_index, unravel_index +from numpy._core import overrides, linspace +from numpy.lib.stride_tricks import as_strided +from numpy.lib._function_base_impl import diff + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', + 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal', + 'diag_indices', 'diag_indices_from' +] + + +def _ix__dispatcher(*args): + return args + + +@array_function_dispatch(_ix__dispatcher) +def ix_(*args): + """ + Construct an open mesh from multiple sequences. + + This function takes N 1-D sequences and returns N outputs with N + dimensions each, such that the shape is 1 in all but one dimension + and the dimension with the non-unit shape value cycles through all + N dimensions. + + Using `ix_` one can quickly construct index arrays that will index + the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array + ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. + + Parameters + ---------- + args : 1-D sequences + Each sequence should be of integer or boolean type. + Boolean sequences will be interpreted as boolean masks for the + corresponding dimension (equivalent to passing in + ``np.nonzero(boolean_sequence)``). + + Returns + ------- + out : tuple of ndarrays + N arrays with N dimensions each, with N the number of input + sequences. Together these arrays form an open mesh. + + See Also + -------- + ogrid, mgrid, meshgrid + + Examples + -------- + >>> a = np.arange(10).reshape(2, 5) + >>> a + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> ixgrid = np.ix_([0, 1], [2, 4]) + >>> ixgrid + (array([[0], + [1]]), array([[2, 4]])) + >>> ixgrid[0].shape, ixgrid[1].shape + ((2, 1), (1, 2)) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + >>> ixgrid = np.ix_([True, True], [2, 4]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + >>> ixgrid = np.ix_([True, True], [False, False, True, False, True]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + """ + out = [] + nd = len(args) + for k, new in enumerate(args): + if not isinstance(new, _nx.ndarray): + new = np.asarray(new) + if new.size == 0: + # Explicitly type empty arrays to avoid float default + new = new.astype(_nx.intp) + if new.ndim != 1: + raise ValueError("Cross index must be 1 dimensional") + if issubdtype(new.dtype, _nx.bool): + new, = new.nonzero() + new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1)) + out.append(new) + return tuple(out) + + +class nd_grid: + """ + Construct a multi-dimensional "meshgrid". + + ``grid = nd_grid()`` creates an instance which will return a mesh-grid + when indexed. The dimension and number of the output arrays are equal + to the number of indexing dimensions. If the step length is not a + complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then the + integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + If instantiated with an argument of ``sparse=True``, the mesh-grid is + open (or not fleshed out) so that only one-dimension of each returned + argument is greater than 1. + + Parameters + ---------- + sparse : bool, optional + Whether the grid is sparse or not. Default is False. + + Notes + ----- + Two instances of `nd_grid` are made available in the NumPy namespace, + `mgrid` and `ogrid`, approximately defined as:: + + mgrid = nd_grid(sparse=False) + ogrid = nd_grid(sparse=True) + + Users should use these pre-defined instances instead of using `nd_grid` + directly. + """ + + def __init__(self, sparse=False): + self.sparse = sparse + + def __getitem__(self, key): + try: + size = [] + # Mimic the behavior of `np.arange` and use a data type + # which is at least as large as `np.int_` + num_list = [0] + for k in range(len(key)): + step = key[k].step + start = key[k].start + stop = key[k].stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + step = abs(step) + size.append(int(step)) + else: + size.append( + int(math.ceil((stop - start) / (step*1.0)))) + num_list += [start, stop, step] + typ = _nx.result_type(*num_list) + if self.sparse: + nn = [_nx.arange(_x, dtype=_t) + for _x, _t in zip(size, (typ,)*len(size))] + else: + nn = _nx.indices(size, typ) + for k, kk in enumerate(key): + step = kk.step + start = kk.start + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + step = int(abs(step)) + if step != 1: + step = (kk.stop - start) / float(step - 1) + nn[k] = (nn[k]*step+start) + if self.sparse: + slobj = [_nx.newaxis]*len(size) + for k in range(len(size)): + slobj[k] = slice(None, None) + nn[k] = nn[k][tuple(slobj)] + slobj[k] = _nx.newaxis + return tuple(nn) # ogrid -> tuple of arrays + return nn # mgrid -> ndarray + except (IndexError, TypeError): + step = key.step + stop = key.stop + start = key.start + if start is None: + start = 0 + if isinstance(step, (_nx.complexfloating, complex)): + # Prevent the (potential) creation of integer arrays + step_float = abs(step) + step = length = int(step_float) + if step != 1: + step = (key.stop-start)/float(step-1) + typ = _nx.result_type(start, stop, step_float) + return _nx.arange(0, length, 1, dtype=typ)*step + start + else: + return _nx.arange(start, stop, step) + + +class MGridClass(nd_grid): + """ + An instance which returns a dense multi-dimensional "meshgrid". + + An instance which returns a dense (or fleshed out) mesh-grid + when indexed, so that each returned argument has the same shape. + The dimensions and number of the output arrays are equal to the + number of indexing dimensions. If the step length is not a complex + number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ------- + mesh-grid : ndarray + A single array, containing a set of `ndarray`\\ s all of the same + dimensions. stacked along the first axis. + + See Also + -------- + ogrid : like `mgrid` but returns open (not fleshed out) mesh grids + meshgrid: return coordinate matrices from coordinate vectors + r_ : array concatenator + :ref:`how-to-partition` + + Examples + -------- + >>> np.mgrid[0:5, 0:5] + array([[[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]], + [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]]]) + >>> np.mgrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + + >>> np.mgrid[0:4].shape + (4,) + >>> np.mgrid[0:4, 0:5].shape + (2, 4, 5) + >>> np.mgrid[0:4, 0:5, 0:6].shape + (3, 4, 5, 6) + + """ + + def __init__(self): + super().__init__(sparse=False) + + +mgrid = MGridClass() + + +class OGridClass(nd_grid): + """ + An instance which returns an open multi-dimensional "meshgrid". + + An instance which returns an open (i.e. not fleshed out) mesh-grid + when indexed, so that only one dimension of each returned array is + greater than 1. The dimension and number of the output arrays are + equal to the number of indexing dimensions. If the step length is + not a complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ------- + mesh-grid : ndarray or tuple of ndarrays + If the input is a single slice, returns an array. + If the input is multiple slices, returns a tuple of arrays, with + only one dimension not equal to 1. + + See Also + -------- + mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids + meshgrid: return coordinate matrices from coordinate vectors + r_ : array concatenator + :ref:`how-to-partition` + + Examples + -------- + >>> from numpy import ogrid + >>> ogrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + >>> ogrid[0:5, 0:5] + (array([[0], + [1], + [2], + [3], + [4]]), + array([[0, 1, 2, 3, 4]])) + + """ + + def __init__(self): + super().__init__(sparse=True) + + +ogrid = OGridClass() + + +class AxisConcatenator: + """ + Translates slice objects to concatenation along an axis. + + For detailed documentation on usage, see `r_`. + """ + # allow ma.mr_ to override this + concatenate = staticmethod(_nx.concatenate) + makemat = staticmethod(matrixlib.matrix) + + def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): + self.axis = axis + self.matrix = matrix + self.trans1d = trans1d + self.ndmin = ndmin + + def __getitem__(self, key): + # handle matrix builder syntax + if isinstance(key, str): + frame = sys._getframe().f_back + mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals) + return mymat + + if not isinstance(key, tuple): + key = (key,) + + # copy attributes, since they can be overridden in the first argument + trans1d = self.trans1d + ndmin = self.ndmin + matrix = self.matrix + axis = self.axis + + objs = [] + # dtypes or scalars for weak scalar handling in result_type + result_type_objs = [] + + for k, item in enumerate(key): + scalar = False + if isinstance(item, slice): + step = item.step + start = item.start + stop = item.stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + size = int(abs(step)) + newobj = linspace(start, stop, num=size) + else: + newobj = _nx.arange(start, stop, step) + if ndmin > 1: + newobj = array(newobj, copy=None, ndmin=ndmin) + if trans1d != -1: + newobj = newobj.swapaxes(-1, trans1d) + elif isinstance(item, str): + if k != 0: + raise ValueError("special directives must be the " + "first entry.") + if item in ('r', 'c'): + matrix = True + col = (item == 'c') + continue + if ',' in item: + vec = item.split(',') + try: + axis, ndmin = [int(x) for x in vec[:2]] + if len(vec) == 3: + trans1d = int(vec[2]) + continue + except Exception as e: + raise ValueError( + "unknown special directive {!r}".format(item) + ) from e + try: + axis = int(item) + continue + except (ValueError, TypeError) as e: + raise ValueError("unknown special directive") from e + elif type(item) in ScalarType: + scalar = True + newobj = item + else: + item_ndim = np.ndim(item) + newobj = array(item, copy=None, subok=True, ndmin=ndmin) + if trans1d != -1 and item_ndim < ndmin: + k2 = ndmin - item_ndim + k1 = trans1d + if k1 < 0: + k1 += k2 + 1 + defaxes = list(range(ndmin)) + axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2] + newobj = newobj.transpose(axes) + + objs.append(newobj) + if scalar: + result_type_objs.append(item) + else: + result_type_objs.append(newobj.dtype) + + # Ensure that scalars won't up-cast unless warranted, for 0, drops + # through to error in concatenate. + if len(result_type_objs) != 0: + final_dtype = _nx.result_type(*result_type_objs) + # concatenate could do cast, but that can be overridden: + objs = [array(obj, copy=None, subok=True, + ndmin=ndmin, dtype=final_dtype) for obj in objs] + + res = self.concatenate(tuple(objs), axis=axis) + + if matrix: + oldndim = res.ndim + res = self.makemat(res) + if oldndim == 1 and col: + res = res.T + return res + + def __len__(self): + return 0 + +# separate classes are used here instead of just making r_ = concatentor(0), +# etc. because otherwise we couldn't get the doc string to come out right +# in help(r_) + + +class RClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the first axis. + + This is a simple way to build up arrays quickly. There are two use cases. + + 1. If the index expression contains comma separated arrays, then stack + them along their first axis. + 2. If the index expression contains slice notation or scalars then create + a 1-D array with a range indicated by the slice notation. + + If slice notation is used, the syntax ``start:stop:step`` is equivalent + to ``np.arange(start, stop, step)`` inside of the brackets. However, if + ``step`` is an imaginary number (i.e. 100j) then its integer portion is + interpreted as a number-of-points desired and the start and stop are + inclusive. In other words ``start:stop:stepj`` is interpreted as + ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. + After expansion of slice notation, all comma separated sequences are + concatenated together. + + Optional character strings placed as the first element of the index + expression can be used to change the output. The strings 'r' or 'c' result + in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) + matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 + (column) matrix is produced. If the result is 2-D then both provide the + same matrix result. + + A string integer specifies which axis to stack multiple comma separated + arrays along. A string of two comma-separated integers allows indication + of the minimum number of dimensions to force each entry into as the + second integer (the axis to concatenate along is still the first integer). + + A string with three comma-separated integers allows specification of the + axis to concatenate along, the minimum number of dimensions to force the + entries to, and which axis should contain the start of the arrays which + are less than the specified number of dimensions. In other words the third + integer allows you to specify where the 1's should be placed in the shape + of the arrays that have their shapes upgraded. By default, they are placed + in the front of the shape tuple. The third argument allows you to specify + where the start of the array should be instead. Thus, a third argument of + '0' would place the 1's at the end of the array shape. Negative integers + specify where in the new shape tuple the last dimension of upgraded arrays + should be placed, so the default is '-1'. + + Parameters + ---------- + Not a function, so takes no parameters + + + Returns + ------- + A concatenated ndarray or matrix. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + c_ : Translates slice objects to concatenation along the second axis. + + Examples + -------- + >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] + array([1, 2, 3, ..., 4, 5, 6]) + >>> np.r_[-1:1:6j, [0]*3, 5, 6] + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) + + String integers specify the axis to concatenate along or the minimum + number of dimensions to force entries into. + + >>> a = np.array([[0, 1, 2], [3, 4, 5]]) + >>> np.r_['-1', a, a] # concatenate along last axis + array([[0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5]]) + >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.r_['0,2,0', [1,2,3], [4,5,6]] + array([[1], + [2], + [3], + [4], + [5], + [6]]) + >>> np.r_['1,2,0', [1,2,3], [4,5,6]] + array([[1, 4], + [2, 5], + [3, 6]]) + + Using 'r' or 'c' as a first string argument creates a matrix. + + >>> np.r_['r',[1,2,3], [4,5,6]] + matrix([[1, 2, 3, 4, 5, 6]]) + + """ + + def __init__(self): + AxisConcatenator.__init__(self, 0) + + +r_ = RClass() + + +class CClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the second axis. + + This is short-hand for ``np.r_['-1,2,0', index expression]``, which is + useful because of its common occurrence. In particular, arrays will be + stacked along their last axis after being upgraded to at least 2-D with + 1's post-pended to the shape (column vectors made out of 1-D arrays). + + See Also + -------- + column_stack : Stack 1-D arrays as columns into a 2-D array. + r_ : For more detailed documentation. + + Examples + -------- + >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] + array([[1, 2, 3, ..., 4, 5, 6]]) + + """ + + def __init__(self): + AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) + + +c_ = CClass() + + +@set_module('numpy') +class ndenumerate: + """ + Multidimensional index iterator. + + Return an iterator yielding pairs of array coordinates and values. + + Parameters + ---------- + arr : ndarray + Input array. + + See Also + -------- + ndindex, flatiter + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> for index, x in np.ndenumerate(a): + ... print(index, x) + (0, 0) 1 + (0, 1) 2 + (1, 0) 3 + (1, 1) 4 + + """ + + def __init__(self, arr): + self.iter = np.asarray(arr).flat + + def __next__(self): + """ + Standard iterator method, returns the index tuple and array value. + + Returns + ------- + coords : tuple of ints + The indices of the current iteration. + val : scalar + The array element of the current iteration. + + """ + return self.iter.coords, next(self.iter) + + def __iter__(self): + return self + + +@set_module('numpy') +class ndindex: + """ + An N-dimensional iterator object to index arrays. + + Given the shape of an array, an `ndindex` instance iterates over + the N-dimensional index of the array. At each iteration a tuple + of indices is returned, the last dimension is iterated over first. + + Parameters + ---------- + shape : ints, or a single tuple of ints + The size of each dimension of the array can be passed as + individual parameters or as the elements of a tuple. + + See Also + -------- + ndenumerate, flatiter + + Examples + -------- + Dimensions as individual arguments + + >>> for index in np.ndindex(3, 2, 1): + ... print(index) + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + Same dimensions - but in a tuple ``(3, 2, 1)`` + + >>> for index in np.ndindex((3, 2, 1)): + ... print(index) + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + """ + + def __init__(self, *shape): + if len(shape) == 1 and isinstance(shape[0], tuple): + shape = shape[0] + x = as_strided(_nx.zeros(1), shape=shape, + strides=_nx.zeros_like(shape)) + self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], + order='C') + + def __iter__(self): + return self + + def ndincr(self): + """ + Increment the multi-dimensional index by one. + + This method is for backward compatibility only: do not use. + + .. deprecated:: 1.20.0 + This method has been advised against since numpy 1.8.0, but only + started emitting DeprecationWarning as of this version. + """ + # NumPy 1.20.0, 2020-09-08 + warnings.warn( + "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead", + DeprecationWarning, stacklevel=2) + next(self) + + def __next__(self): + """ + Standard iterator method, updates the index and returns the index + tuple. + + Returns + ------- + val : tuple of ints + Returns a tuple containing the indices of the current + iteration. + + """ + next(self._it) + return self._it.multi_index + + +# You can do all this with slice() plus a few special objects, +# but there's a lot to remember. This version is simpler because +# it uses the standard array indexing syntax. +# +# Written by Konrad Hinsen +# last revision: 1999-7-23 +# +# Cosmetic changes by T. Oliphant 2001 +# +# + +class IndexExpression: + """ + A nicer way to build up index tuples for arrays. + + .. note:: + Use one of the two predefined instances ``index_exp`` or `s_` + rather than directly using `IndexExpression`. + + For any index combination, including slicing and axis insertion, + ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any + array `a`. However, ``np.index_exp[indices]`` can be used anywhere + in Python code and returns a tuple of slice objects that can be + used in the construction of complex index expressions. + + Parameters + ---------- + maketuple : bool + If True, always returns a tuple. + + See Also + -------- + s_ : Predefined instance without tuple conversion: + `s_ = IndexExpression(maketuple=False)`. + The ``index_exp`` is another predefined instance that + always returns a tuple: + `index_exp = IndexExpression(maketuple=True)`. + + Notes + ----- + You can do all this with :class:`slice` plus a few special objects, + but there's a lot to remember and this version is simpler because + it uses the standard array indexing syntax. + + Examples + -------- + >>> np.s_[2::2] + slice(2, None, 2) + >>> np.index_exp[2::2] + (slice(2, None, 2),) + + >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] + array([2, 4]) + + """ + + def __init__(self, maketuple): + self.maketuple = maketuple + + def __getitem__(self, item): + if self.maketuple and not isinstance(item, tuple): + return (item,) + else: + return item + + +index_exp = IndexExpression(maketuple=True) +s_ = IndexExpression(maketuple=False) + +# End contribution from Konrad. + + +# The following functions complement those in twodim_base, but are +# applicable to N-dimensions. + + +def _fill_diagonal_dispatcher(a, val, wrap=None): + return (a,) + + +@array_function_dispatch(_fill_diagonal_dispatcher) +def fill_diagonal(a, val, wrap=False): + """Fill the main diagonal of the given array of any dimensionality. + + For an array `a` with ``a.ndim >= 2``, the diagonal is the list of + values ``a[i, ..., i]`` with indices ``i`` all identical. This function + modifies the input array in-place without returning a value. + + Parameters + ---------- + a : array, at least 2-D. + Array whose diagonal is to be filled in-place. + val : scalar or array_like + Value(s) to write on the diagonal. If `val` is scalar, the value is + written along the diagonal. If array-like, the flattened `val` is + written along the diagonal, repeating if necessary to fill all + diagonal entries. + + wrap : bool + For tall matrices in NumPy version up to 1.6.2, the + diagonal "wrapped" after N columns. You can have this behavior + with this option. This affects only tall matrices. + + See also + -------- + diag_indices, diag_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + This functionality can be obtained via `diag_indices`, but internally + this version uses a much faster implementation that never constructs the + indices and uses simple slicing. + + Examples + -------- + >>> a = np.zeros((3, 3), int) + >>> np.fill_diagonal(a, 5) + >>> a + array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + + The same function can operate on a 4-D array: + + >>> a = np.zeros((3, 3, 3, 3), int) + >>> np.fill_diagonal(a, 4) + + We only show a few blocks for clarity: + + >>> a[0, 0] + array([[4, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + >>> a[1, 1] + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 0]]) + >>> a[2, 2] + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 4]]) + + The wrap option affects only tall matrices: + + >>> # tall matrices no wrap + >>> a = np.zeros((5, 3), int) + >>> np.fill_diagonal(a, 4) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [0, 0, 0]]) + + >>> # tall matrices wrap + >>> a = np.zeros((5, 3), int) + >>> np.fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [4, 0, 0]]) + + >>> # wide matrices + >>> a = np.zeros((3, 5), int) + >>> np.fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0, 0, 0], + [0, 4, 0, 0, 0], + [0, 0, 4, 0, 0]]) + + The anti-diagonal can be filled by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.zeros((3, 3), int); + >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip + >>> a + array([[0, 0, 1], + [0, 2, 0], + [3, 0, 0]]) + >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip + >>> a + array([[0, 0, 3], + [0, 2, 0], + [1, 0, 0]]) + + Note that the order in which the diagonal is filled varies depending + on the flip function. + """ + if a.ndim < 2: + raise ValueError("array must be at least 2-d") + end = None + if a.ndim == 2: + # Explicit, fast formula for the common case. For 2-d arrays, we + # accept rectangular ones. + step = a.shape[1] + 1 + # This is needed to don't have tall matrix have the diagonal wrap. + if not wrap: + end = a.shape[1] * a.shape[1] + else: + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not np.all(diff(a.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + step = 1 + (np.cumprod(a.shape[:-1])).sum() + + # Write the value out into the diagonal. + a.flat[:end:step] = val + + +@set_module('numpy') +def diag_indices(n, ndim=2): + """ + Return the indices to access the main diagonal of an array. + + This returns a tuple of indices that can be used to access the main + diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape + (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for + ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` + for ``i = [0..n-1]``. + + Parameters + ---------- + n : int + The size, along each dimension, of the arrays for which the returned + indices can be used. + + ndim : int, optional + The number of dimensions. + + See Also + -------- + diag_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Create a set of indices to access the diagonal of a (4, 4) array: + + >>> di = np.diag_indices(4) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + >>> a[di] = 100 + >>> a + array([[100, 1, 2, 3], + [ 4, 100, 6, 7], + [ 8, 9, 100, 11], + [ 12, 13, 14, 100]]) + + Now, we create indices to manipulate a 3-D array: + + >>> d3 = np.diag_indices(2, 3) + >>> d3 + (array([0, 1]), array([0, 1]), array([0, 1])) + + And use it to set the diagonal of an array of zeros to 1: + + >>> a = np.zeros((2, 2, 2), dtype=int) + >>> a[d3] = 1 + >>> a + array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + + """ + idx = np.arange(n) + return (idx,) * ndim + + +def _diag_indices_from(arr): + return (arr,) + + +@array_function_dispatch(_diag_indices_from) +def diag_indices_from(arr): + """ + Return the indices to access the main diagonal of an n-dimensional array. + + See `diag_indices` for full details. + + Parameters + ---------- + arr : array, at least 2-D + + See Also + -------- + diag_indices + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + + Create a 4 by 4 array. + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Get the indices of the diagonal elements. + + >>> di = np.diag_indices_from(a) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + + >>> a[di] + array([ 0, 5, 10, 15]) + + This is simply syntactic sugar for diag_indices. + + >>> np.diag_indices(a.shape[0]) + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + + """ + + if not arr.ndim >= 2: + raise ValueError("input array must be at least 2-d") + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not np.all(diff(arr.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + + return diag_indices(arr.shape[0], arr.ndim) diff --git a/phivenv/Lib/site-packages/numpy/lib/_index_tricks_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_index_tricks_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b2b6b202da572a39e62ef11f2c154e6106239651 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_index_tricks_impl.pyi @@ -0,0 +1,154 @@ +from collections.abc import Sequence +from typing import ( + Any, + TypeVar, + Generic, + overload, + Literal, + SupportsIndex, +) + +import numpy as np +from numpy import ( + # Circumvent a naming conflict with `AxisConcatenator.matrix` + matrix as _Matrix, + ndenumerate as ndenumerate, + ndindex as ndindex, + ndarray, + dtype, + str_, + bytes_, + int_, + float64, + complex128, +) +from numpy._typing import ( + # Arrays + ArrayLike, + _NestedSequence, + _FiniteNestedSequence, + NDArray, + + # DTypes + DTypeLike, + _SupportsDType, +) + +from numpy._core.multiarray import ( + unravel_index as unravel_index, + ravel_multi_index as ravel_multi_index, +) + +_T = TypeVar("_T") +_DType = TypeVar("_DType", bound=dtype[Any]) +_BoolType = TypeVar("_BoolType", Literal[True], Literal[False]) +_TupType = TypeVar("_TupType", bound=tuple[Any, ...]) +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) + +__all__: list[str] + +@overload +def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[Any, _DType], ...]: ... +@overload +def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ... +@overload +def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]: ... +@overload +def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ... +@overload +def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]: ... +@overload +def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float64], ...]: ... +@overload +def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex128], ...]: ... + +class nd_grid(Generic[_BoolType]): + sparse: _BoolType + def __init__(self, sparse: _BoolType = ...) -> None: ... + @overload + def __getitem__( + self: nd_grid[Literal[False]], + key: slice | Sequence[slice], + ) -> NDArray[Any]: ... + @overload + def __getitem__( + self: nd_grid[Literal[True]], + key: slice | Sequence[slice], + ) -> tuple[NDArray[Any], ...]: ... + +class MGridClass(nd_grid[Literal[False]]): + def __init__(self) -> None: ... + +mgrid: MGridClass + +class OGridClass(nd_grid[Literal[True]]): + def __init__(self) -> None: ... + +ogrid: OGridClass + +class AxisConcatenator: + axis: int + matrix: bool + ndmin: int + trans1d: int + def __init__( + self, + axis: int = ..., + matrix: bool = ..., + ndmin: int = ..., + trans1d: int = ..., + ) -> None: ... + @staticmethod + @overload + def concatenate( # type: ignore[misc] + *a: ArrayLike, axis: SupportsIndex = ..., out: None = ... + ) -> NDArray[Any]: ... + @staticmethod + @overload + def concatenate( + *a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ... + ) -> _ArrayType: ... + @staticmethod + def makemat( + data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ... + ) -> _Matrix[Any, Any]: ... + + # TODO: Sort out this `__getitem__` method + def __getitem__(self, key: Any) -> Any: ... + +class RClass(AxisConcatenator): + axis: Literal[0] + matrix: Literal[False] + ndmin: Literal[1] + trans1d: Literal[-1] + def __init__(self) -> None: ... + +r_: RClass + +class CClass(AxisConcatenator): + axis: Literal[-1] + matrix: Literal[False] + ndmin: Literal[2] + trans1d: Literal[0] + def __init__(self) -> None: ... + +c_: CClass + +class IndexExpression(Generic[_BoolType]): + maketuple: _BoolType + def __init__(self, maketuple: _BoolType) -> None: ... + @overload + def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc] + @overload + def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]: ... + @overload + def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ... + +index_exp: IndexExpression[Literal[True]] +s_: IndexExpression[Literal[False]] + +def fill_diagonal(a: NDArray[Any], val: Any, wrap: bool = ...) -> None: ... +def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]: ... +def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]: ... + +# NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex` diff --git a/phivenv/Lib/site-packages/numpy/lib/_iotools.py b/phivenv/Lib/site-packages/numpy/lib/_iotools.py new file mode 100644 index 0000000000000000000000000000000000000000..4ef600265debdbd3922bfd34199117dcf94bcf28 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_iotools.py @@ -0,0 +1,897 @@ +"""A collection of functions designed to help I/O with ascii files. + +""" +__docformat__ = "restructuredtext en" + +import numpy as np +import numpy._core.numeric as nx +from numpy._utils import asbytes, asunicode + + +def _decode_line(line, encoding=None): + """Decode bytes from binary input streams. + + Defaults to decoding from 'latin1'. That differs from the behavior of + np.compat.asunicode that decodes from 'ascii'. + + Parameters + ---------- + line : str or bytes + Line to be decoded. + encoding : str + Encoding used to decode `line`. + + Returns + ------- + decoded_line : str + + """ + if type(line) is bytes: + if encoding is None: + encoding = "latin1" + line = line.decode(encoding) + + return line + + +def _is_string_like(obj): + """ + Check whether obj behaves like a string. + """ + try: + obj + '' + except (TypeError, ValueError): + return False + return True + + +def _is_bytes_like(obj): + """ + Check whether obj behaves like a bytes object. + """ + try: + obj + b'' + except (TypeError, ValueError): + return False + return True + + +def has_nested_fields(ndtype): + """ + Returns whether one or several fields of a dtype are nested. + + Parameters + ---------- + ndtype : dtype + Data-type of a structured array. + + Raises + ------ + AttributeError + If `ndtype` does not have a `names` attribute. + + Examples + -------- + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) + >>> np.lib._iotools.has_nested_fields(dt) + False + + """ + for name in ndtype.names or (): + if ndtype[name].names is not None: + return True + return False + + +def flatten_dtype(ndtype, flatten_base=False): + """ + Unpack a structured data-type by collapsing nested fields and/or fields + with a shape. + + Note that the field names are lost. + + Parameters + ---------- + ndtype : dtype + The datatype to collapse + flatten_base : bool, optional + If True, transform a field with a shape into several fields. Default is + False. + + Examples + -------- + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ... ('block', int, (2, 3))]) + >>> np.lib._iotools.flatten_dtype(dt) + [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')] + >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) + [dtype('S4'), + dtype('float64'), + dtype('float64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64')] + + """ + names = ndtype.names + if names is None: + if flatten_base: + return [ndtype.base] * int(np.prod(ndtype.shape)) + return [ndtype.base] + else: + types = [] + for field in names: + info = ndtype.fields[field] + flat_dt = flatten_dtype(info[0], flatten_base) + types.extend(flat_dt) + return types + + +class LineSplitter: + """ + Object to split a string at a given delimiter or at given places. + + Parameters + ---------- + delimiter : str, int, or sequence of ints, optional + If a string, character used to delimit consecutive fields. + If an integer or a sequence of integers, width(s) of each field. + comments : str, optional + Character used to mark the beginning of a comment. Default is '#'. + autostrip : bool, optional + Whether to strip each individual field. Default is True. + + """ + + def autostrip(self, method): + """ + Wrapper to strip each member of the output of `method`. + + Parameters + ---------- + method : function + Function that takes a single argument and returns a sequence of + strings. + + Returns + ------- + wrapped : function + The result of wrapping `method`. `wrapped` takes a single input + argument and returns a list of strings that are stripped of + white-space. + + """ + return lambda input: [_.strip() for _ in method(input)] + + def __init__(self, delimiter=None, comments='#', autostrip=True, + encoding=None): + delimiter = _decode_line(delimiter) + comments = _decode_line(comments) + + self.comments = comments + + # Delimiter is a character + if (delimiter is None) or isinstance(delimiter, str): + delimiter = delimiter or None + _handyman = self._delimited_splitter + # Delimiter is a list of field widths + elif hasattr(delimiter, '__iter__'): + _handyman = self._variablewidth_splitter + idx = np.cumsum([0] + list(delimiter)) + delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])] + # Delimiter is a single integer + elif int(delimiter): + (_handyman, delimiter) = ( + self._fixedwidth_splitter, int(delimiter)) + else: + (_handyman, delimiter) = (self._delimited_splitter, None) + self.delimiter = delimiter + if autostrip: + self._handyman = self.autostrip(_handyman) + else: + self._handyman = _handyman + self.encoding = encoding + + def _delimited_splitter(self, line): + """Chop off comments, strip, and split at delimiter. """ + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip(" \r\n") + if not line: + return [] + return line.split(self.delimiter) + + def _fixedwidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip("\r\n") + if not line: + return [] + fixed = self.delimiter + slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)] + return [line[s] for s in slices] + + def _variablewidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + if not line: + return [] + slices = self.delimiter + return [line[s] for s in slices] + + def __call__(self, line): + return self._handyman(_decode_line(line, self.encoding)) + + +class NameValidator: + """ + Object to validate a list of strings to use as field names. + + The strings are stripped of any non alphanumeric character, and spaces + are replaced by '_'. During instantiation, the user can define a list + of names to exclude, as well as a list of invalid characters. Names in + the exclusion list are appended a '_' character. + + Once an instance has been created, it can be called with a list of + names, and a list of valid names will be created. The `__call__` + method accepts an optional keyword "default" that sets the default name + in case of ambiguity. By default this is 'f', so that names will + default to `f0`, `f1`, etc. + + Parameters + ---------- + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default + list ['return', 'file', 'print']. Excluded names are appended an + underscore: for example, `file` becomes `file_` if supplied. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + case_sensitive : {True, False, 'upper', 'lower'}, optional + * If True, field names are case-sensitive. + * If False or 'upper', field names are converted to upper case. + * If 'lower', field names are converted to lower case. + + The default value is True. + replace_space : '_', optional + Character(s) used in replacement of white spaces. + + Notes + ----- + Calling an instance of `NameValidator` is the same as calling its + method `validate`. + + Examples + -------- + >>> validator = np.lib._iotools.NameValidator() + >>> validator(['file', 'field2', 'with space', 'CaSe']) + ('file_', 'field2', 'with_space', 'CaSe') + + >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], + ... deletechars='q', + ... case_sensitive=False) + >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) + ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE') + + """ + + defaultexcludelist = ['return', 'file', 'print'] + defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") + + def __init__(self, excludelist=None, deletechars=None, + case_sensitive=None, replace_space='_'): + # Process the exclusion list .. + if excludelist is None: + excludelist = [] + excludelist.extend(self.defaultexcludelist) + self.excludelist = excludelist + # Process the list of characters to delete + if deletechars is None: + delete = self.defaultdeletechars + else: + delete = set(deletechars) + delete.add('"') + self.deletechars = delete + # Process the case option ..... + if (case_sensitive is None) or (case_sensitive is True): + self.case_converter = lambda x: x + elif (case_sensitive is False) or case_sensitive.startswith('u'): + self.case_converter = lambda x: x.upper() + elif case_sensitive.startswith('l'): + self.case_converter = lambda x: x.lower() + else: + msg = 'unrecognized case_sensitive value %s.' % case_sensitive + raise ValueError(msg) + + self.replace_space = replace_space + + def validate(self, names, defaultfmt="f%i", nbfields=None): + """ + Validate a list of strings as field names for a structured array. + + Parameters + ---------- + names : sequence of str + Strings to be validated. + defaultfmt : str, optional + Default format string, used if validating a given string + reduces its length to zero. + nbfields : integer, optional + Final number of validated names, used to expand or shrink the + initial list of names. + + Returns + ------- + validatednames : list of str + The list of validated field names. + + Notes + ----- + A `NameValidator` instance can be called directly, which is the + same as calling `validate`. For examples, see `NameValidator`. + + """ + # Initial checks .............. + if (names is None): + if (nbfields is None): + return None + names = [] + if isinstance(names, str): + names = [names, ] + if nbfields is not None: + nbnames = len(names) + if (nbnames < nbfields): + names = list(names) + [''] * (nbfields - nbnames) + elif (nbnames > nbfields): + names = names[:nbfields] + # Set some shortcuts ........... + deletechars = self.deletechars + excludelist = self.excludelist + case_converter = self.case_converter + replace_space = self.replace_space + # Initializes some variables ... + validatednames = [] + seen = dict() + nbempty = 0 + + for item in names: + item = case_converter(item).strip() + if replace_space: + item = item.replace(' ', replace_space) + item = ''.join([c for c in item if c not in deletechars]) + if item == '': + item = defaultfmt % nbempty + while item in names: + nbempty += 1 + item = defaultfmt % nbempty + nbempty += 1 + elif item in excludelist: + item += '_' + cnt = seen.get(item, 0) + if cnt > 0: + validatednames.append(item + '_%d' % cnt) + else: + validatednames.append(item) + seen[item] = cnt + 1 + return tuple(validatednames) + + def __call__(self, names, defaultfmt="f%i", nbfields=None): + return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields) + + +def str2bool(value): + """ + Tries to transform a string supposed to represent a boolean to a boolean. + + Parameters + ---------- + value : str + The string that is transformed to a boolean. + + Returns + ------- + boolval : bool + The boolean representation of `value`. + + Raises + ------ + ValueError + If the string is not 'True' or 'False' (case independent) + + Examples + -------- + >>> np.lib._iotools.str2bool('TRUE') + True + >>> np.lib._iotools.str2bool('false') + False + + """ + value = value.upper() + if value == 'TRUE': + return True + elif value == 'FALSE': + return False + else: + raise ValueError("Invalid boolean") + + +class ConverterError(Exception): + """ + Exception raised when an error occurs in a converter for string values. + + """ + pass + + +class ConverterLockError(ConverterError): + """ + Exception raised when an attempt is made to upgrade a locked converter. + + """ + pass + + +class ConversionWarning(UserWarning): + """ + Warning issued when a string converter has a problem. + + Notes + ----- + In `genfromtxt` a `ConversionWarning` is issued if raising exceptions + is explicitly suppressed with the "invalid_raise" keyword. + + """ + pass + + +class StringConverter: + """ + Factory class for function transforming a string into another object + (int, float). + + After initialization, an instance can be called to transform a string + into another object. If the string is recognized as representing a + missing value, a default value is returned. + + Attributes + ---------- + func : function + Function used for the conversion. + default : any + Default value to return when the input corresponds to a missing + value. + type : type + Type of the output. + _status : int + Integer representing the order of the conversion. + _mapper : sequence of tuples + Sequence of tuples (dtype, function, default value) to evaluate in + order. + _locked : bool + Holds `locked` parameter. + + Parameters + ---------- + dtype_or_func : {None, dtype, function}, optional + If a `dtype`, specifies the input data type, used to define a basic + function and a default value for missing data. For example, when + `dtype` is float, the `func` attribute is set to `float` and the + default value to `np.nan`. If a function, this function is used to + convert a string to another object. In this case, it is recommended + to give an associated default value as input. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, `StringConverter` + tries to supply a reasonable default value. + missing_values : {None, sequence of str}, optional + ``None`` or sequence of strings indicating a missing value. If ``None`` + then missing values are indicated by empty entries. The default is + ``None``. + locked : bool, optional + Whether the StringConverter should be locked to prevent automatic + upgrade or not. Default is False. + + """ + _mapper = [(nx.bool, str2bool, False), + (nx.int_, int, -1),] + + # On 32-bit systems, we need to make sure that we explicitly include + # nx.int64 since ns.int_ is nx.int32. + if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize: + _mapper.append((nx.int64, int, -1)) + + _mapper.extend([(nx.float64, float, nx.nan), + (nx.complex128, complex, nx.nan + 0j), + (nx.longdouble, nx.longdouble, nx.nan), + # If a non-default dtype is passed, fall back to generic + # ones (should only be used for the converter) + (nx.integer, int, -1), + (nx.floating, float, nx.nan), + (nx.complexfloating, complex, nx.nan + 0j), + # Last, try with the string types (must be last, because + # `_mapper[-1]` is used as default in some cases) + (nx.str_, asunicode, '???'), + (nx.bytes_, asbytes, '???'), + ]) + + @classmethod + def _getdtype(cls, val): + """Returns the dtype of the input variable.""" + return np.array(val).dtype + + @classmethod + def _getsubdtype(cls, val): + """Returns the type of the dtype of the input variable.""" + return np.array(val).dtype.type + + @classmethod + def _dtypeortype(cls, dtype): + """Returns dtype for datetime64 and type of dtype otherwise.""" + + # This is a bit annoying. We want to return the "general" type in most + # cases (ie. "string" rather than "S10"), but we want to return the + # specific type for datetime64 (ie. "datetime64[us]" rather than + # "datetime64"). + if dtype.type == np.datetime64: + return dtype + return dtype.type + + @classmethod + def upgrade_mapper(cls, func, default=None): + """ + Upgrade the mapper of a StringConverter by adding a new function and + its corresponding default. + + The input function (or sequence of functions) and its associated + default value (if any) is inserted in penultimate position of the + mapper. The corresponding type is estimated from the dtype of the + default value. + + Parameters + ---------- + func : var + Function, or sequence of functions + + Examples + -------- + >>> import dateutil.parser + >>> import datetime + >>> dateparser = dateutil.parser.parse + >>> defaultdate = datetime.date(2000, 1, 1) + >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) + """ + # Func is a single functions + if hasattr(func, '__call__'): + cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) + return + elif hasattr(func, '__iter__'): + if isinstance(func[0], (tuple, list)): + for _ in func: + cls._mapper.insert(-1, _) + return + if default is None: + default = [None] * len(func) + else: + default = list(default) + default.append([None] * (len(func) - len(default))) + for fct, dft in zip(func, default): + cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft)) + + @classmethod + def _find_map_entry(cls, dtype): + # if a converter for the specific dtype is available use that + for i, (deftype, func, default_def) in enumerate(cls._mapper): + if dtype.type == deftype: + return i, (deftype, func, default_def) + + # otherwise find an inexact match + for i, (deftype, func, default_def) in enumerate(cls._mapper): + if np.issubdtype(dtype.type, deftype): + return i, (deftype, func, default_def) + + raise LookupError + + def __init__(self, dtype_or_func=None, default=None, missing_values=None, + locked=False): + # Defines a lock for upgrade + self._locked = bool(locked) + # No input dtype: minimal initialization + if dtype_or_func is None: + self.func = str2bool + self._status = 0 + self.default = default or False + dtype = np.dtype('bool') + else: + # Is the input a np.dtype ? + try: + self.func = None + dtype = np.dtype(dtype_or_func) + except TypeError: + # dtype_or_func must be a function, then + if not hasattr(dtype_or_func, '__call__'): + errmsg = ("The input argument `dtype` is neither a" + " function nor a dtype (got '%s' instead)") + raise TypeError(errmsg % type(dtype_or_func)) + # Set the function + self.func = dtype_or_func + # If we don't have a default, try to guess it or set it to + # None + if default is None: + try: + default = self.func('0') + except ValueError: + default = None + dtype = self._getdtype(default) + + # find the best match in our mapper + try: + self._status, (_, func, default_def) = self._find_map_entry(dtype) + except LookupError: + # no match + self.default = default + _, func, _ = self._mapper[-1] + self._status = 0 + else: + # use the found default only if we did not already have one + if default is None: + self.default = default_def + else: + self.default = default + + # If the input was a dtype, set the function to the last we saw + if self.func is None: + self.func = func + + # If the status is 1 (int), change the function to + # something more robust. + if self.func == self._mapper[1][1]: + if issubclass(dtype.type, np.uint64): + self.func = np.uint64 + elif issubclass(dtype.type, np.int64): + self.func = np.int64 + else: + self.func = lambda x: int(float(x)) + # Store the list of strings corresponding to missing values. + if missing_values is None: + self.missing_values = {''} + else: + if isinstance(missing_values, str): + missing_values = missing_values.split(",") + self.missing_values = set(list(missing_values) + ['']) + + self._callingfunction = self._strict_call + self.type = self._dtypeortype(dtype) + self._checked = False + self._initial_default = default + + def _loose_call(self, value): + try: + return self.func(value) + except ValueError: + return self.default + + def _strict_call(self, value): + try: + + # We check if we can convert the value using the current function + new_value = self.func(value) + + # In addition to having to check whether func can convert the + # value, we also have to make sure that we don't get overflow + # errors for integers. + if self.func is int: + try: + np.array(value, dtype=self.type) + except OverflowError: + raise ValueError + + # We're still here so we can now return the new value + return new_value + + except ValueError: + if value.strip() in self.missing_values: + if not self._status: + self._checked = False + return self.default + raise ValueError("Cannot convert string '%s'" % value) + + def __call__(self, value): + return self._callingfunction(value) + + def _do_upgrade(self): + # Raise an exception if we locked the converter... + if self._locked: + errmsg = "Converter is locked and cannot be upgraded" + raise ConverterLockError(errmsg) + _statusmax = len(self._mapper) + # Complains if we try to upgrade by the maximum + _status = self._status + if _status == _statusmax: + errmsg = "Could not find a valid conversion function" + raise ConverterError(errmsg) + elif _status < _statusmax - 1: + _status += 1 + self.type, self.func, default = self._mapper[_status] + self._status = _status + if self._initial_default is not None: + self.default = self._initial_default + else: + self.default = default + + def upgrade(self, value): + """ + Find the best converter for a given string, and return the result. + + The supplied string `value` is converted by testing different + converters in order. First the `func` method of the + `StringConverter` instance is tried, if this fails other available + converters are tried. The order in which these other converters + are tried is determined by the `_status` attribute of the instance. + + Parameters + ---------- + value : str + The string to convert. + + Returns + ------- + out : any + The result of converting `value` with the appropriate converter. + + """ + self._checked = True + try: + return self._strict_call(value) + except ValueError: + self._do_upgrade() + return self.upgrade(value) + + def iterupgrade(self, value): + self._checked = True + if not hasattr(value, '__iter__'): + value = (value,) + _strict_call = self._strict_call + try: + for _m in value: + _strict_call(_m) + except ValueError: + self._do_upgrade() + self.iterupgrade(value) + + def update(self, func, default=None, testing_value=None, + missing_values='', locked=False): + """ + Set StringConverter attributes directly. + + Parameters + ---------- + func : function + Conversion function. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, + `StringConverter` tries to supply a reasonable default value. + testing_value : str, optional + A string representing a standard input value of the converter. + This string is used to help defining a reasonable default + value. + missing_values : {sequence of str, None}, optional + Sequence of strings indicating a missing value. If ``None``, then + the existing `missing_values` are cleared. The default is ``''``. + locked : bool, optional + Whether the StringConverter should be locked to prevent + automatic upgrade or not. Default is False. + + Notes + ----- + `update` takes the same parameters as the constructor of + `StringConverter`, except that `func` does not accept a `dtype` + whereas `dtype_or_func` in the constructor does. + + """ + self.func = func + self._locked = locked + + # Don't reset the default to None if we can avoid it + if default is not None: + self.default = default + self.type = self._dtypeortype(self._getdtype(default)) + else: + try: + tester = func(testing_value or '1') + except (TypeError, ValueError): + tester = None + self.type = self._dtypeortype(self._getdtype(tester)) + + # Add the missing values to the existing set or clear it. + if missing_values is None: + # Clear all missing values even though the ctor initializes it to + # set(['']) when the argument is None. + self.missing_values = set() + else: + if not np.iterable(missing_values): + missing_values = [missing_values] + if not all(isinstance(v, str) for v in missing_values): + raise TypeError("missing_values must be strings or unicode") + self.missing_values.update(missing_values) + + +def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): + """ + Convenience function to create a `np.dtype` object. + + The function processes the input `dtype` and matches it with the given + names. + + Parameters + ---------- + ndtype : var + Definition of the dtype. Can be any string or dictionary recognized + by the `np.dtype` function, or a sequence of types. + names : str or sequence, optional + Sequence of strings to use as field names for a structured dtype. + For convenience, `names` can be a string of a comma-separated list + of names. + defaultfmt : str, optional + Format string used to define missing names, such as ``"f%i"`` + (default) or ``"fields_%02i"``. + validationargs : optional + A series of optional arguments used to initialize a + `NameValidator`. + + Examples + -------- + >>> np.lib._iotools.easy_dtype(float) + dtype('float64') + >>> np.lib._iotools.easy_dtype("i4, f8") + dtype([('f0', '>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i") + dtype([('field_000', '>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c") + dtype([('a', '>> np.lib._iotools.easy_dtype(float, names="a,b,c") + dtype([('a', '>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmin(a) + 1.0 + >>> np.nanmin(a, axis=0) + array([1., 2.]) + >>> np.nanmin(a, axis=1) + array([1., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmin([1, 2, np.nan, np.inf]) + 1.0 + >>> np.nanmin([1, 2, np.nan, -np.inf]) + -inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if initial is not np._NoValue: + kwargs['initial'] = initial + if where is not np._NoValue: + kwargs['where'] = where + + if type(a) is np.ndarray and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) + res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=2) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, +np.inf) + res = np.amin(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + kwargs.pop("initial", None) + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=2) + return res + + +def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanmax_dispatcher) +def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis, ignoring any + NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is + raised and NaN is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose maximum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the maximum is computed. The default is to compute + the maximum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `max` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + + .. versionadded:: 1.8.0 + initial : scalar, optional + The minimum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to compare for the maximum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanmax : ndarray + An array with the same shape as `a`, with the specified axis removed. + If `a` is a 0-d array, or if axis is None, an ndarray scalar is + returned. The same dtype as `a` is returned. + + See Also + -------- + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + amax : + The maximum value of an array along a given axis, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amin, fmin, minimum + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.max. + + Examples + -------- + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmax(a) + 3.0 + >>> np.nanmax(a, axis=0) + array([3., 2.]) + >>> np.nanmax(a, axis=1) + array([2., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmax([1, 2, np.nan, -np.inf]) + 2.0 + >>> np.nanmax([1, 2, np.nan, np.inf]) + inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if initial is not np._NoValue: + kwargs['initial'] = initial + if where is not np._NoValue: + kwargs['where'] = where + + if type(a) is np.ndarray and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmax correctly (gh-8975) + res = np.fmax.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=2) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, -np.inf) + res = np.amax(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + kwargs.pop("initial", None) + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=2) + return res + + +def _nanargmin_dispatcher(a, axis=None, out=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_nanargmin_dispatcher) +def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Return the indices of the minimum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results + cannot be trusted if a slice contains only NaNs and Infs. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + .. versionadded:: 1.22.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmin, nanargmax + + Examples + -------- + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmin(a) + 0 + >>> np.nanargmin(a) + 2 + >>> np.nanargmin(a, axis=0) + array([1, 1]) + >>> np.nanargmin(a, axis=1) + array([1, 0]) + + """ + a, mask = _replace_nan(a, np.inf) + if mask is not None and mask.size: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + res = np.argmin(a, axis=axis, out=out, keepdims=keepdims) + return res + + +def _nanargmax_dispatcher(a, axis=None, out=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_nanargmax_dispatcher) +def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Return the indices of the maximum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the + results cannot be trusted if a slice contains only NaNs and -Infs. + + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + .. versionadded:: 1.22.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmax, nanargmin + + Examples + -------- + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmax(a) + 0 + >>> np.nanargmax(a) + 1 + >>> np.nanargmax(a, axis=0) + array([1, 0]) + >>> np.nanargmax(a, axis=1) + array([1, 1]) + + """ + a, mask = _replace_nan(a, -np.inf) + if mask is not None and mask.size: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + res = np.argmax(a, axis=axis, out=out, keepdims=keepdims) + return res + + +def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nansum_dispatcher) +def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. + + In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or + empty. In later versions zero is returned. + + Parameters + ---------- + a : array_like + Array containing numbers whose sum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the sum is computed. The default is to compute the + sum of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + + .. versionadded:: 1.8.0 + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + + .. versionadded:: 1.8.0 + initial : scalar, optional + Starting value for the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to include in the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nansum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.sum : Sum across array propagating NaNs. + isnan : Show which elements are NaN. + isfinite : Show which elements are not NaN or +/-inf. + + Notes + ----- + If both positive and negative infinity are present, the sum will be Not + A Number (NaN). + + Examples + -------- + >>> np.nansum(1) + 1 + >>> np.nansum([1]) + 1 + >>> np.nansum([1, np.nan]) + 1.0 + >>> a = np.array([[1, 1], [1, np.nan]]) + >>> np.nansum(a) + 3.0 + >>> np.nansum(a, axis=0) + array([2., 1.]) + >>> np.nansum([1, np.nan, np.inf]) + inf + >>> np.nansum([1, np.nan, -np.inf]) + -inf + >>> from numpy.testing import suppress_warnings + >>> with np.errstate(invalid="ignore"): + ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present + np.float64(nan) + + """ + a, mask = _replace_nan(a, 0) + return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + initial=initial, where=where) + + +def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanprod_dispatcher) +def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the product of array elements over a given axis treating Not a + Numbers (NaNs) as ones. + + One is returned for slices that are all-NaN or empty. + + .. versionadded:: 1.10.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose product is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the product is computed. The default is to compute + the product of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. + keepdims : bool, optional + If True, the axes which are reduced are left in the result as + dimensions with size one. With this option, the result will + broadcast correctly against the original `arr`. + initial : scalar, optional + The starting value for this product. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to include in the product. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.prod : Product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> np.nanprod(1) + 1 + >>> np.nanprod([1]) + 1 + >>> np.nanprod([1, np.nan]) + 1.0 + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanprod(a) + 6.0 + >>> np.nanprod(a, axis=0) + array([3., 2.]) + + """ + a, mask = _replace_nan(a, 1) + return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + initial=initial, where=where) + + +def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumsum_dispatcher) +def nancumsum(a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are + encountered and leading NaNs are replaced by zeros. + + Zeros are returned for slices that are all-NaN or empty. + + .. versionadded:: 1.12.0 + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` for + more details. + + Returns + ------- + nancumsum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.cumsum : Cumulative sum across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> np.nancumsum(1) + array([1]) + >>> np.nancumsum([1]) + array([1]) + >>> np.nancumsum([1, np.nan]) + array([1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumsum(a) + array([1., 3., 6., 6.]) + >>> np.nancumsum(a, axis=0) + array([[1., 2.], + [4., 2.]]) + >>> np.nancumsum(a, axis=1) + array([[1., 3.], + [3., 3.]]) + + """ + a, mask = _replace_nan(a, 0) + return np.cumsum(a, axis=axis, dtype=dtype, out=out) + + +def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumprod_dispatcher) +def nancumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of array elements over a given axis treating Not a + Numbers (NaNs) as one. The cumulative product does not change when NaNs are + encountered and leading NaNs are replaced by ones. + + Ones are returned for slices that are all-NaN or empty. + + .. versionadded:: 1.12.0 + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + nancumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.cumprod : Cumulative product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> np.nancumprod(1) + array([1]) + >>> np.nancumprod([1]) + array([1]) + >>> np.nancumprod([1, np.nan]) + array([1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumprod(a) + array([1., 2., 6., 6.]) + >>> np.nancumprod(a, axis=0) + array([[1., 2.], + [3., 2.]]) + >>> np.nancumprod(a, axis=1) + array([[1., 2.], + [3., 3.]]) + + """ + a, mask = _replace_nan(a, 1) + return np.cumprod(a, axis=axis, dtype=dtype, out=out) + + +def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + *, where=None): + return (a, out) + + +@array_function_dispatch(_nanmean_dispatcher) +def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + *, where=np._NoValue): + """ + Compute the arithmetic mean along the specified axis, ignoring NaNs. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the means are computed. The default is to compute + the mean of the flattened array. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for inexact inputs, it is the same as the input + dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + where : array_like of bool, optional + Elements to include in the mean. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. Nan is + returned for slices that contain only NaNs. + + See Also + -------- + average : Weighted average + mean : Arithmetic mean taken while not ignoring NaNs + var, nanvar + + Notes + ----- + The arithmetic mean is the sum of the non-NaN elements along the axis + divided by the number of non-NaN elements. + + Note that for floating-point input, the mean is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32`. Specifying a + higher-precision accumulator using the `dtype` keyword can alleviate + this issue. + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanmean(a) + 2.6666666666666665 + >>> np.nanmean(a, axis=0) + array([2., 4.]) + >>> np.nanmean(a, axis=1) + array([1., 3.5]) # may vary + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims, + where=where) + tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + avg = _divide_by_count(tot, cnt, out=out) + + isbad = (cnt == 0) + if isbad.any(): + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) + # NaN is the only possible bad value, so no further + # action is needed to handle bad results. + return avg + + +def _nanmedian1d(arr1d, overwrite_input=False): + """ + Private function for rank 1 arrays. Compute the median ignoring NaNs. + See nanmedian for parameter usage + """ + arr1d_parsed, _, overwrite_input = _remove_nan_1d( + arr1d, overwrite_input=overwrite_input, + ) + + if arr1d_parsed.size == 0: + # Ensure that a nan-esque scalar of the appropriate type (and unit) + # is returned for `timedelta64` and `complexfloating` + return arr1d[-1] + + return np.median(arr1d_parsed, overwrite_input=overwrite_input) + + +def _nanmedian(a, axis=None, out=None, overwrite_input=False): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanmedian for parameter usage + + """ + if axis is None or a.ndim == 1: + part = a.ravel() + if out is None: + return _nanmedian1d(part, overwrite_input) + else: + out[...] = _nanmedian1d(part, overwrite_input) + return out + else: + # for small medians use sort + indexing which is still faster than + # apply_along_axis + # benchmarked with shuffled (50, 50, x) containing a few NaN + if a.shape[axis] < 600: + return _nanmedian_small(a, axis, out, overwrite_input) + result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input) + if out is not None: + out[...] = result + return result + + +def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): + """ + sort + indexing median, faster for small medians along multiple + dimensions due to the high overhead of apply_along_axis + + see nanmedian for parameter usage + """ + a = np.ma.masked_array(a, np.isnan(a)) + m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) + for i in range(np.count_nonzero(m.mask.ravel())): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=5) + + fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan + if out is not None: + out[...] = m.filled(fill_value) + return out + return m.filled(fill_value) + + +def _nanmedian_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanmedian_dispatcher) +def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue): + """ + Compute the median along the specified axis, while ignoring NaNs. + + Returns the median of the array elements. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default + is to compute the median along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, median, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i.e., + ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two + middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) + >>> a[0, 1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.median(a) + np.float64(nan) + >>> np.nanmedian(a) + 3.0 + >>> np.nanmedian(a, axis=0) + array([6.5, 2. , 2.5]) + >>> np.median(a, axis=1) + array([nan, 2.]) + >>> b = a.copy() + >>> np.nanmedian(b, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.nanmedian(b, axis=None, overwrite_input=True) + 3.0 + >>> assert not np.all(a==b) + + """ + a = np.asanyarray(a) + # apply_along_axis in _nanmedian doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + + return fnb._ureduce(a, func=_nanmedian, keepdims=keepdims, + axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _nanpercentile_dispatcher( + a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None, interpolation=None): + return (a, q, out, weights) + + +@array_function_dispatch(_nanpercentile_dispatcher) +def nanpercentile( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + *, + weights=None, + interpolation=None, +): + """ + Compute the qth percentile of the data along the specified axis, + while ignoring nan values. + + Returns the qth percentile(s) of the array elements. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored. + q : array_like of float + Percentile or sequence of percentiles to compute, which must be + between 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The default + is to compute the percentile(s) along a flattened version of the + array. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape and buffer length as the expected output, but the + type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by + intermediate calculations, to save memory. In this case, the + contents of the input `a` after this function completes is + undefined. + method : str, optional + This parameter specifies the method to use for estimating the + percentile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the percentile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + + .. versionadded:: 2.0.0 + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + nanmean + nanmedian : equivalent to ``nanpercentile(..., 50)`` + percentile, median, mean + nanquantile : equivalent to nanpercentile, except q in range [0, 1]. + + Notes + ----- + For more information please see `numpy.percentile` + + Examples + -------- + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.percentile(a, 50) + np.float64(nan) + >>> np.nanpercentile(a, 50) + 3.0 + >>> np.nanpercentile(a, 50, axis=0) + array([6.5, 2. , 2.5]) + >>> np.nanpercentile(a, 50, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.nanpercentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanpercentile(a, 50, axis=0, out=out) + array([6.5, 2. , 2.5]) + >>> m + array([6.5, 2. , 2.5]) + + >>> b = a.copy() + >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + if interpolation is not None: + method = fnb._check_interpolation_as_method( + method, interpolation, "nanpercentile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100) + # undo any decay that the ufunc performed (see gh-13105) + q = np.asanyarray(q) + if not fnb._quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights) + + +def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None, + interpolation=None): + return (a, q, out, weights) + + +@array_function_dispatch(_nanquantile_dispatcher) +def nanquantile( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + *, + weights=None, + interpolation=None, +): + """ + Compute the qth quantile of the data along the specified axis, + while ignoring nan values. + Returns the qth quantile(s) of the array elements. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored + q : array_like of float + Probability or sequence of probabilities for the quantiles to compute. + Values must be between 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The + default is to compute the quantile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + method : str, optional + This parameter specifies the method to use for estimating the + quantile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the quantile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + + .. versionadded:: 2.0.0 + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single probability and `axis=None`, then the result + is a scalar. If multiple probability levels are given, first axis of + the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + quantile + nanmean, nanmedian + nanmedian : equivalent to ``nanquantile(..., 0.5)`` + nanpercentile : same as nanquantile, but with q in the range [0, 100]. + + Notes + ----- + For more information please see `numpy.quantile` + + Examples + -------- + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.quantile(a, 0.5) + np.float64(nan) + >>> np.nanquantile(a, 0.5) + 3.0 + >>> np.nanquantile(a, 0.5, axis=0) + array([6.5, 2. , 2.5]) + >>> np.nanquantile(a, 0.5, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.nanquantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanquantile(a, 0.5, axis=0, out=out) + array([6.5, 2. , 2.5]) + >>> m + array([6.5, 2. , 2.5]) + >>> b = a.copy() + >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + + if interpolation is not None: + method = fnb._check_interpolation_as_method( + method, interpolation, "nanquantile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + # Use dtype of array if possible (e.g., if q is a python int or float). + if isinstance(q, (int, float)) and a.dtype.kind == "f": + q = np.asanyarray(q, dtype=a.dtype) + else: + q = np.asanyarray(q) + + if not fnb._quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights) + + +def _nanquantile_unchecked( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + weights=None, +): + """Assumes that q is in [0, 1], and is an ndarray""" + # apply_along_axis in _nanpercentile doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + return fnb._ureduce(a, + func=_nanquantile_ureduce_func, + q=q, + weights=weights, + keepdims=keepdims, + axis=axis, + out=out, + overwrite_input=overwrite_input, + method=method) + + +def _nanquantile_ureduce_func( + a: np.array, + q: np.array, + weights: np.array, + axis: int = None, + out=None, + overwrite_input: bool = False, + method="linear", +): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanpercentile for parameter usage + """ + if axis is None or a.ndim == 1: + part = a.ravel() + wgt = None if weights is None else weights.ravel() + result = _nanquantile_1d(part, q, overwrite_input, method, weights=wgt) + else: + # Note that this code could try to fill in `out` right away + if weights is None: + result = np.apply_along_axis(_nanquantile_1d, axis, a, q, + overwrite_input, method, weights) + # apply_along_axis fills in collapsed axis with results. + # Move those axes to the beginning to match percentile's + # convention. + if q.ndim != 0: + from_ax = [axis + i for i in range(q.ndim)] + result = np.moveaxis(result, from_ax, list(range(q.ndim))) + else: + # We need to apply along axis over 2 arrays, a and weights. + # move operation axes to end for simplicity: + a = np.moveaxis(a, axis, -1) + if weights is not None: + weights = np.moveaxis(weights, axis, -1) + if out is not None: + result = out + else: + # weights are limited to `inverted_cdf` so the result dtype + # is known to be identical to that of `a` here: + result = np.empty_like(a, shape=q.shape + a.shape[:-1]) + + for ii in np.ndindex(a.shape[:-1]): + result[(...,) + ii] = _nanquantile_1d( + a[ii], q, weights=weights[ii], + overwrite_input=overwrite_input, method=method, + ) + # This path dealt with `out` already... + return result + + if out is not None: + out[...] = result + return result + + +def _nanquantile_1d( + arr1d, q, overwrite_input=False, method="linear", weights=None, +): + """ + Private function for rank 1 arrays. Compute quantile ignoring NaNs. + See nanpercentile for parameter usage + """ + # TODO: What to do when arr1d = [1, np.nan] and weights = [0, 1]? + arr1d, weights, overwrite_input = _remove_nan_1d(arr1d, + second_arr1d=weights, overwrite_input=overwrite_input) + if arr1d.size == 0: + # convert to scalar + return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()] + + return fnb._quantile_unchecked( + arr1d, + q, + overwrite_input=overwrite_input, + method=method, + weights=weights, + ) + + +def _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, + correction=None): + return (a, out) + + +@array_function_dispatch(_nanvar_dispatcher) +def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, + *, where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + """ + Compute the variance along the specified axis, while ignoring NaNs. + + Returns the variance of the array elements, a measure of the spread of + a distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the variance is computed. The default is to compute + the variance of the flattened array. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float64`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : {int, float}, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + where : array_like of bool, optional + Elements to include in the variance. See `~numpy.ufunc.reduce` for + details. + + .. versionadded:: 1.22.0 + + mean : array_like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this var function. + + .. versionadded:: 1.26.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + variance : ndarray, see dtype parameter above + If `out` is None, return a new array containing the variance, + otherwise return a reference to the output array. If ddof is >= the + number of non-NaN elements in a slice or the slice contains only + NaNs, then the result for that slice is NaN. + + See Also + -------- + std : Standard deviation + mean : Average + var : Variance while not ignoring NaNs + nanstd, nanmean + :ref:`ufuncs-output-type` + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite + population. ``ddof=0`` provides a maximum likelihood estimate of the + variance for normally distributed variables. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + For this function to work on sub-classes of ndarray, they must define + `sum` with the kwarg `keepdims` + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanvar(a) + 1.5555555555555554 + >>> np.nanvar(a, axis=0) + array([1., 0.]) + >>> np.nanvar(a, axis=1) + array([0., 0.25]) # may vary + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where, mean=mean, + correction=correction) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + if correction != np._NoValue: + if ddof != 0: + raise ValueError( + "ddof and correction can't be provided simultaneously." + ) + else: + ddof = correction + + # Compute mean + if type(arr) is np.matrix: + _keepdims = np._NoValue + else: + _keepdims = True + + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims, + where=where) + + if mean is not np._NoValue: + avg = mean + else: + # we need to special case matrix for reverse compatibility + # in order for this to work, these sums need to be called with + # keepdims=True, however matrix now raises an error in this case, but + # the reason that it drops the keepdims kwarg is to force keepdims=True + # so this used to work by serendipity. + avg = np.sum(arr, axis=axis, dtype=dtype, + keepdims=_keepdims, where=where) + avg = _divide_by_count(avg, cnt) + + # Compute squared deviation from mean. + np.subtract(arr, avg, out=arr, casting='unsafe', where=where) + arr = _copyto(arr, 0, mask) + if issubclass(arr.dtype.type, np.complexfloating): + sqr = np.multiply(arr, arr.conj(), out=arr, where=where).real + else: + sqr = np.multiply(arr, arr, out=arr, where=where) + + # Compute variance. + var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + + # Precaution against reduced object arrays + try: + var_ndim = var.ndim + except AttributeError: + var_ndim = np.ndim(var) + if var_ndim < cnt.ndim: + # Subclasses of ndarray may ignore keepdims, so check here. + cnt = cnt.squeeze(axis) + dof = cnt - ddof + var = _divide_by_count(var, dof) + + isbad = (dof <= 0) + if np.any(isbad): + warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, + stacklevel=2) + # NaN, inf, or negative numbers are all possible bad + # values, so explicitly replace them with NaN. + var = _copyto(var, np.nan, isbad) + return var + + +def _nanstd_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, + correction=None): + return (a, out) + + +@array_function_dispatch(_nanstd_dispatcher) +def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, + *, where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + """ + Compute the standard deviation along the specified axis, while + ignoring NaNs. + + Returns the standard deviation, a measure of the spread of a + distribution, of the non-NaN array elements. The standard deviation is + computed for the flattened array by default, otherwise over the + specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Calculate the standard deviation of the non-NaN values. + axis : {int, tuple of int, None}, optional + Axis or axes along which the standard deviation is computed. The default is + to compute the standard deviation of the flattened array. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it + is the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the + calculated values) will be cast if necessary. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this value is anything but the default it is passed through + as-is to the relevant functions of the sub-classes. If these + functions do not have a `keepdims` kwarg, a RuntimeError will + be raised. + where : array_like of bool, optional + Elements to include in the standard deviation. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + mean : array_like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this std function. + + .. versionadded:: 1.26.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard + deviation, otherwise return a reference to the output array. If + ddof is >= the number of non-NaN elements in a slice or the slice + contains only NaNs, then the result for that slice is NaN. + + See Also + -------- + var, mean, std + nanvar, nanmean + :ref:`ufuncs-output-type` + + Notes + ----- + The standard deviation is the square root of the average of the squared + deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``. + + The average squared deviation is normally calculated as + ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is + specified, the divisor ``N - ddof`` is used instead. In standard + statistical practice, ``ddof=1`` provides an unbiased estimator of the + variance of the infinite population. ``ddof=0`` provides a maximum + likelihood estimate of the variance for normally distributed variables. + The standard deviation computed in this function is the square root of + the estimated variance, so even with ``ddof=1``, it will not be an + unbiased estimate of the standard deviation per se. + + Note that, for complex numbers, `std` takes the absolute value before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the *std* is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example + below). Specifying a higher-accuracy accumulator using the `dtype` + keyword can alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanstd(a) + 1.247219128924647 + >>> np.nanstd(a, axis=0) + array([1., 0.]) + >>> np.nanstd(a, axis=1) + array([0., 0.5]) # may vary + + """ + var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where, mean=mean, + correction=correction) + if isinstance(var, np.ndarray): + std = np.sqrt(var, out=var) + elif hasattr(var, 'dtype'): + std = var.dtype.type(np.sqrt(var)) + else: + std = np.sqrt(var) + return std diff --git a/phivenv/Lib/site-packages/numpy/lib/_nanfunctions_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_nanfunctions_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..603368ec5b23bcf667795cf61c90c4dd1f36037b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_nanfunctions_impl.pyi @@ -0,0 +1,38 @@ +from numpy._core.fromnumeric import ( + amin, + amax, + argmin, + argmax, + sum, + prod, + cumsum, + cumprod, + mean, + var, + std +) + +from numpy.lib._function_base_impl import ( + median, + percentile, + quantile, +) + +__all__: list[str] + +# NOTE: In reaility these functions are not aliases but distinct functions +# with identical signatures. +nanmin = amin +nanmax = amax +nanargmin = argmin +nanargmax = argmax +nansum = sum +nanprod = prod +nancumsum = cumsum +nancumprod = cumprod +nanmean = mean +nanvar = var +nanstd = std +nanmedian = median +nanpercentile = percentile +nanquantile = quantile diff --git a/phivenv/Lib/site-packages/numpy/lib/_npyio_impl.py b/phivenv/Lib/site-packages/numpy/lib/_npyio_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..58afa9fbb1b9119d0a5541590bee72a4b5e66508 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_npyio_impl.py @@ -0,0 +1,2594 @@ +""" +IO related functions. +""" +import os +import re +import functools +import itertools +import warnings +import weakref +import contextlib +import operator +from operator import itemgetter, index as opindex, methodcaller +from collections.abc import Mapping +import pickle + +import numpy as np +from . import format +from ._datasource import DataSource +from numpy._core import overrides +from numpy._core.multiarray import packbits, unpackbits +from numpy._core._multiarray_umath import _load_from_filelike +from numpy._core.overrides import set_array_function_like_doc, set_module +from ._iotools import ( + LineSplitter, NameValidator, StringConverter, ConverterError, + ConverterLockError, ConversionWarning, _is_string_like, + has_nested_fields, flatten_dtype, easy_dtype, _decode_line + ) +from numpy._utils import asunicode, asbytes + + +__all__ = [ + 'savetxt', 'loadtxt', 'genfromtxt', 'load', 'save', 'savez', + 'savez_compressed', 'packbits', 'unpackbits', 'fromregex' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +class BagObj: + """ + BagObj(obj) + + Convert attribute look-ups to getitems on the object passed in. + + Parameters + ---------- + obj : class instance + Object on which attribute look-up is performed. + + Examples + -------- + >>> from numpy.lib._npyio_impl import BagObj as BO + >>> class BagDemo: + ... def __getitem__(self, key): # An instance of BagObj(BagDemo) + ... # will call this method when any + ... # attribute look-up is required + ... result = "Doesn't matter what you want, " + ... return result + "you're gonna get this" + ... + >>> demo_obj = BagDemo() + >>> bagobj = BO(demo_obj) + >>> bagobj.hello_there + "Doesn't matter what you want, you're gonna get this" + >>> bagobj.I_can_be_anything + "Doesn't matter what you want, you're gonna get this" + + """ + + def __init__(self, obj): + # Use weakref to make NpzFile objects collectable by refcount + self._obj = weakref.proxy(obj) + + def __getattribute__(self, key): + try: + return object.__getattribute__(self, '_obj')[key] + except KeyError: + raise AttributeError(key) from None + + def __dir__(self): + """ + Enables dir(bagobj) to list the files in an NpzFile. + + This also enables tab-completion in an interpreter or IPython. + """ + return list(object.__getattribute__(self, '_obj').keys()) + + +def zipfile_factory(file, *args, **kwargs): + """ + Create a ZipFile. + + Allows for Zip64, and the `file` argument can accept file, str, or + pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile + constructor. + """ + if not hasattr(file, 'read'): + file = os.fspath(file) + import zipfile + kwargs['allowZip64'] = True + return zipfile.ZipFile(file, *args, **kwargs) + + +@set_module('numpy.lib.npyio') +class NpzFile(Mapping): + """ + NpzFile(fid) + + A dictionary-like object with lazy-loading of files in the zipped + archive provided on construction. + + `NpzFile` is used to load files in the NumPy ``.npz`` data archive + format. It assumes that files in the archive have a ``.npy`` extension, + other files are ignored. + + The arrays and file strings are lazily loaded on either + getitem access using ``obj['key']`` or attribute lookup using + ``obj.f.key``. A list of all files (without ``.npy`` extensions) can + be obtained with ``obj.files`` and the ZipFile object itself using + ``obj.zip``. + + Attributes + ---------- + files : list of str + List of all files in the archive with a ``.npy`` extension. + zip : ZipFile instance + The ZipFile object initialized with the zipped archive. + f : BagObj instance + An object on which attribute can be performed as an alternative + to getitem access on the `NpzFile` instance itself. + allow_pickle : bool, optional + Allow loading pickled data. Default: False + + .. versionchanged:: 1.16.3 + Made default False in response to CVE-2019-6446. + + pickle_kwargs : dict, optional + Additional keyword arguments to pass on to pickle.load. + These are only useful when loading object arrays saved on + Python 2 when using Python 3. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Parameters + ---------- + fid : file, str, or pathlib.Path + The zipped archive to open. This is either a file-like object + or a string containing the path to the archive. + own_fid : bool, optional + Whether NpzFile should close the file handle. + Requires that `fid` is a file-like object. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + >>> np.savez(outfile, x=x, y=y) + >>> _ = outfile.seek(0) + + >>> npz = np.load(outfile) + >>> isinstance(npz, np.lib.npyio.NpzFile) + True + >>> npz + NpzFile 'object' with keys: x, y + >>> sorted(npz.files) + ['x', 'y'] + >>> npz['x'] # getitem access + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> npz.f.x # attribute lookup + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + # Make __exit__ safe if zipfile_factory raises an exception + zip = None + fid = None + _MAX_REPR_ARRAY_COUNT = 5 + + def __init__(self, fid, own_fid=False, allow_pickle=False, + pickle_kwargs=None, *, + max_header_size=format._MAX_HEADER_SIZE): + # Import is postponed to here since zipfile depends on gzip, an + # optional component of the so-called standard library. + _zip = zipfile_factory(fid) + self._files = _zip.namelist() + self.files = [] + self.allow_pickle = allow_pickle + self.max_header_size = max_header_size + self.pickle_kwargs = pickle_kwargs + for x in self._files: + if x.endswith('.npy'): + self.files.append(x[:-4]) + else: + self.files.append(x) + self.zip = _zip + self.f = BagObj(self) + if own_fid: + self.fid = fid + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def close(self): + """ + Close the file. + + """ + if self.zip is not None: + self.zip.close() + self.zip = None + if self.fid is not None: + self.fid.close() + self.fid = None + self.f = None # break reference cycle + + def __del__(self): + self.close() + + # Implement the Mapping ABC + def __iter__(self): + return iter(self.files) + + def __len__(self): + return len(self.files) + + def __getitem__(self, key): + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + member = False + if key in self._files: + member = True + elif key in self.files: + member = True + key += '.npy' + if member: + bytes = self.zip.open(key) + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.close() + if magic == format.MAGIC_PREFIX: + bytes = self.zip.open(key) + return format.read_array(bytes, + allow_pickle=self.allow_pickle, + pickle_kwargs=self.pickle_kwargs, + max_header_size=self.max_header_size) + else: + return self.zip.read(key) + else: + raise KeyError(f"{key} is not a file in the archive") + + def __contains__(self, key): + return (key in self._files or key in self.files) + + def __repr__(self): + # Get filename or default to `object` + if isinstance(self.fid, str): + filename = self.fid + else: + filename = getattr(self.fid, "name", "object") + + # Get the name of arrays + array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT]) + if len(self.files) > self._MAX_REPR_ARRAY_COUNT: + array_names += "..." + return f"NpzFile {filename!r} with keys: {array_names}" + + # Work around problems with the docstrings in the Mapping methods + # They contain a `->`, which confuses the type annotation interpretations + # of sphinx-docs. See gh-25964 + + def get(self, key, default=None, /): + """ + D.get(k,[,d]) returns D[k] if k in D, else d. d defaults to None. + """ + return Mapping.get(self, key, default) + + def items(self): + """ + D.items() returns a set-like object providing a view on the items + """ + return Mapping.items(self) + + def keys(self): + """ + D.keys() returns a set-like object providing a view on the keys + """ + return Mapping.keys(self) + + def values(self): + """ + D.values() returns a set-like object providing a view on the values + """ + return Mapping.values(self) + + +@set_module('numpy') +def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, + encoding='ASCII', *, max_header_size=format._MAX_HEADER_SIZE): + """ + Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. + + .. warning:: Loading files that contain object arrays uses the ``pickle`` + module, which is not secure against erroneous or maliciously + constructed data. Consider passing ``allow_pickle=False`` to + load data that is known not to contain object arrays for the + safer handling of untrusted sources. + + Parameters + ---------- + file : file-like object, string, or pathlib.Path + The file to read. File-like objects must support the + ``seek()`` and ``read()`` methods and must always + be opened in binary mode. Pickled files require that the + file-like object support the ``readline()`` method as well. + mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional + If not None, then memory-map the file, using the given mode (see + `numpy.memmap` for a detailed description of the modes). A + memory-mapped array is kept on disk. However, it can be accessed + and sliced like any ndarray. Memory mapping is especially useful + for accessing small fragments of large files without reading the + entire file into memory. + allow_pickle : bool, optional + Allow loading pickled object arrays stored in npy files. Reasons for + disallowing pickles include security, as loading pickled data can + execute arbitrary code. If pickles are disallowed, loading object + arrays will fail. Default: False + + .. versionchanged:: 1.16.3 + Made default False in response to CVE-2019-6446. + + fix_imports : bool, optional + Only useful when loading Python 2 generated pickled files on Python 3, + which includes npy/npz files containing object arrays. If `fix_imports` + is True, pickle will try to map the old Python 2 names to the new names + used in Python 3. + encoding : str, optional + What encoding to use when reading Python 2 strings. Only useful when + loading Python 2 generated pickled files in Python 3, which includes + npy/npz files containing object arrays. Values other than 'latin1', + 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical + data. Default: 'ASCII' + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Returns + ------- + result : array, tuple, dict, etc. + Data stored in the file. For ``.npz`` files, the returned instance + of NpzFile class must be closed to avoid leaking file descriptors. + + Raises + ------ + OSError + If the input file does not exist or cannot be read. + UnpicklingError + If ``allow_pickle=True``, but the file cannot be loaded as a pickle. + ValueError + The file contains an object array, but ``allow_pickle=False`` given. + EOFError + When calling ``np.load`` multiple times on the same file handle, + if all data has already been read + + See Also + -------- + save, savez, savez_compressed, loadtxt + memmap : Create a memory-map to an array stored in a file on disk. + lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. + + Notes + ----- + - If the file contains pickle data, then whatever object is stored + in the pickle is returned. + - If the file is a ``.npy`` file, then a single array is returned. + - If the file is a ``.npz`` file, then a dictionary-like object is + returned, containing ``{filename: array}`` key-value pairs, one for + each file in the archive. + - If the file is a ``.npz`` file, the returned value supports the + context manager protocol in a similar fashion to the open function:: + + with load('foo.npz') as data: + a = data['a'] + + The underlying file descriptor is closed when exiting the 'with' + block. + + Examples + -------- + Store data to disk, and load it again: + + >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) + >>> np.load('/tmp/123.npy') + array([[1, 2, 3], + [4, 5, 6]]) + + Store compressed data to disk, and load it again: + + >>> a=np.array([[1, 2, 3], [4, 5, 6]]) + >>> b=np.array([1, 2]) + >>> np.savez('/tmp/123.npz', a=a, b=b) + >>> data = np.load('/tmp/123.npz') + >>> data['a'] + array([[1, 2, 3], + [4, 5, 6]]) + >>> data['b'] + array([1, 2]) + >>> data.close() + + Mem-map the stored array, and then access the second row + directly from disk: + + >>> X = np.load('/tmp/123.npy', mmap_mode='r') + >>> X[1, :] + memmap([4, 5, 6]) + + """ + if encoding not in ('ASCII', 'latin1', 'bytes'): + # The 'encoding' value for pickle also affects what encoding + # the serialized binary data of NumPy arrays is loaded + # in. Pickle does not pass on the encoding information to + # NumPy. The unpickling code in numpy._core.multiarray is + # written to assume that unicode data appearing where binary + # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. + # + # Other encoding values can corrupt binary data, and we + # purposefully disallow them. For the same reason, the errors= + # argument is not exposed, as values other than 'strict' + # result can similarly silently corrupt numerical data. + raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") + + pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) + + with contextlib.ExitStack() as stack: + if hasattr(file, 'read'): + fid = file + own_fid = False + else: + fid = stack.enter_context(open(os.fspath(file), "rb")) + own_fid = True + + # Code to distinguish from NumPy binary files and pickles. + _ZIP_PREFIX = b'PK\x03\x04' + _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this + N = len(format.MAGIC_PREFIX) + magic = fid.read(N) + if not magic: + raise EOFError("No data left in file") + # If the file size is less than N, we need to make sure not + # to seek past the beginning of the file + fid.seek(-min(N, len(magic)), 1) # back-up + if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX): + # zip-file (assume .npz) + # Potentially transfer file ownership to NpzFile + stack.pop_all() + ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs, + max_header_size=max_header_size) + return ret + elif magic == format.MAGIC_PREFIX: + # .npy file + if mmap_mode: + if allow_pickle: + max_header_size = 2**64 + return format.open_memmap(file, mode=mmap_mode, + max_header_size=max_header_size) + else: + return format.read_array(fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs, + max_header_size=max_header_size) + else: + # Try a pickle + if not allow_pickle: + raise ValueError("Cannot load file containing pickled data " + "when allow_pickle=False") + try: + return pickle.load(fid, **pickle_kwargs) + except Exception as e: + raise pickle.UnpicklingError( + f"Failed to interpret file {file!r} as a pickle") from e + + +def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): + return (arr,) + + +@array_function_dispatch(_save_dispatcher) +def save(file, arr, allow_pickle=True, fix_imports=True): + """ + Save an array to a binary file in NumPy ``.npy`` format. + + Parameters + ---------- + file : file, str, or pathlib.Path + File or filename to which the data is saved. If file is a file-object, + then the filename is unchanged. If file is a string or Path, + a ``.npy`` extension will be appended to the filename if it does not + already have one. + arr : array_like + Array data to be saved. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True + fix_imports : bool, optional + Only useful in forcing objects in object arrays on Python 3 to be + pickled in a Python 2 compatible way. If `fix_imports` is True, pickle + will try to map the new Python 3 names to the old module names used in + Python 2, so that the pickle data stream is readable with Python 2. + + See Also + -------- + savez : Save several arrays into a ``.npz`` archive + savetxt, load + + Notes + ----- + For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + Any data saved to the file is appended to the end of the file. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + + >>> x = np.arange(10) + >>> np.save(outfile, x) + + >>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file + >>> np.load(outfile) + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + + >>> with open('test.npy', 'wb') as f: + ... np.save(f, np.array([1, 2])) + ... np.save(f, np.array([1, 3])) + >>> with open('test.npy', 'rb') as f: + ... a = np.load(f) + ... b = np.load(f) + >>> print(a, b) + # [1 2] [1 3] + """ + if hasattr(file, 'write'): + file_ctx = contextlib.nullcontext(file) + else: + file = os.fspath(file) + if not file.endswith('.npy'): + file = file + '.npy' + file_ctx = open(file, "wb") + + with file_ctx as fid: + arr = np.asanyarray(arr) + format.write_array(fid, arr, allow_pickle=allow_pickle, + pickle_kwargs=dict(fix_imports=fix_imports)) + + +def _savez_dispatcher(file, *args, **kwds): + yield from args + yield from kwds.values() + + +@array_function_dispatch(_savez_dispatcher) +def savez(file, *args, **kwds): + """Save several arrays into a single file in uncompressed ``.npz`` format. + + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., ``savez(fn, + x, y)``, their names will be `arr_0`, `arr_1`, etc. + + Parameters + ---------- + file : file, str, or pathlib.Path + Either the filename (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the filename if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. + kwds : Keyword arguments, optional + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. + + Returns + ------- + None + + See Also + -------- + save : Save a single array to a binary file in NumPy format. + savetxt : Save an array to a file as plain text. + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is not compressed and each file + in the archive contains one variable in ``.npy`` format. For a + description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile` + object is returned. This is a dictionary-like object which can be queried + for its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Keys passed in `kwds` are used as filenames inside the ZIP archive. + Therefore, keys should be valid filenames; e.g., avoid keys that begin with + ``/`` or contain ``.``. + + When naming variables with keyword arguments, it is not possible to name a + variable ``file``, as this would cause the ``file`` argument to be defined + twice in the call to ``savez``. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + + Using `savez` with \\*args, the arrays are saved with default names. + + >>> np.savez(outfile, x, y) + >>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file + >>> npzfile = np.load(outfile) + >>> npzfile.files + ['arr_0', 'arr_1'] + >>> npzfile['arr_0'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + Using `savez` with \\**kwds, the arrays are saved with the keyword names. + + >>> outfile = TemporaryFile() + >>> np.savez(outfile, x=x, y=y) + >>> _ = outfile.seek(0) + >>> npzfile = np.load(outfile) + >>> sorted(npzfile.files) + ['x', 'y'] + >>> npzfile['x'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + _savez(file, args, kwds, False) + + +def _savez_compressed_dispatcher(file, *args, **kwds): + yield from args + yield from kwds.values() + + +@array_function_dispatch(_savez_compressed_dispatcher) +def savez_compressed(file, *args, **kwds): + """ + Save several arrays into a single file in compressed ``.npz`` format. + + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez_compressed(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., + ``savez_compressed(fn, x, y)``, their names will be `arr_0`, `arr_1`, etc. + + Parameters + ---------- + file : file, str, or pathlib.Path + Either the filename (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the filename if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. + kwds : Keyword arguments, optional + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. + + Returns + ------- + None + + See Also + -------- + numpy.save : Save a single array to a binary file in NumPy format. + numpy.savetxt : Save an array to a file as plain text. + numpy.savez : Save several arrays into an uncompressed ``.npz`` file format + numpy.load : Load the files created by savez_compressed. + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is compressed with + ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable + in ``.npy`` format. For a description of the ``.npy`` format, see + :py:mod:`numpy.lib.format`. + + + When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile` + object is returned. This is a dictionary-like object which can be queried + for its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Examples + -------- + >>> test_array = np.random.rand(3, 2) + >>> test_vector = np.random.rand(4) + >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) + >>> loaded = np.load('/tmp/123.npz') + >>> print(np.array_equal(test_array, loaded['a'])) + True + >>> print(np.array_equal(test_vector, loaded['b'])) + True + + """ + _savez(file, args, kwds, True) + + +def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): + # Import is postponed to here since zipfile depends on gzip, an optional + # component of the so-called standard library. + import zipfile + + if not hasattr(file, 'write'): + file = os.fspath(file) + if not file.endswith('.npz'): + file = file + '.npz' + + namedict = kwds + for i, val in enumerate(args): + key = 'arr_%d' % i + if key in namedict.keys(): + raise ValueError( + "Cannot use un-named variables and keyword %s" % key) + namedict[key] = val + + if compress: + compression = zipfile.ZIP_DEFLATED + else: + compression = zipfile.ZIP_STORED + + zipf = zipfile_factory(file, mode="w", compression=compression) + + for key, val in namedict.items(): + fname = key + '.npy' + val = np.asanyarray(val) + # always force zip64, gh-10776 + with zipf.open(fname, 'w', force_zip64=True) as fid: + format.write_array(fid, val, + allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + + zipf.close() + + +def _ensure_ndmin_ndarray_check_param(ndmin): + """Just checks if the param ndmin is supported on + _ensure_ndmin_ndarray. It is intended to be used as + verification before running anything expensive. + e.g. loadtxt, genfromtxt + """ + # Check correctness of the values of `ndmin` + if ndmin not in [0, 1, 2]: + raise ValueError(f"Illegal value of ndmin keyword: {ndmin}") + +def _ensure_ndmin_ndarray(a, *, ndmin: int): + """This is a helper function of loadtxt and genfromtxt to ensure + proper minimum dimension as requested + + ndim : int. Supported values 1, 2, 3 + ^^ whenever this changes, keep in sync with + _ensure_ndmin_ndarray_check_param + """ + # Verify that the array has at least dimensions `ndmin`. + # Tweak the size and shape of the arrays - remove extraneous dimensions + if a.ndim > ndmin: + a = np.squeeze(a) + # and ensure we have the minimum number of dimensions asked for + # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0 + if a.ndim < ndmin: + if ndmin == 1: + a = np.atleast_1d(a) + elif ndmin == 2: + a = np.atleast_2d(a).T + + return a + + +# amount of lines loadtxt reads in one chunk, can be overridden for testing +_loadtxt_chunksize = 50000 + + +def _check_nonneg_int(value, name="argument"): + try: + operator.index(value) + except TypeError: + raise TypeError(f"{name} must be an integer") from None + if value < 0: + raise ValueError(f"{name} must be nonnegative") + + +def _preprocess_comments(iterable, comments, encoding): + """ + Generator that consumes a line iterated iterable and strips out the + multiple (or multi-character) comments from lines. + This is a pre-processing step to achieve feature parity with loadtxt + (we assume that this feature is a nieche feature). + """ + for line in iterable: + if isinstance(line, bytes): + # Need to handle conversion here, or the splitting would fail + line = line.decode(encoding) + + for c in comments: + line = line.split(c, 1)[0] + + yield line + + +# The number of rows we read in one go if confronted with a parametric dtype +_loadtxt_chunksize = 50000 + + +def _read(fname, *, delimiter=',', comment='#', quote='"', + imaginary_unit='j', usecols=None, skiplines=0, + max_rows=None, converters=None, ndmin=None, unpack=False, + dtype=np.float64, encoding=None): + r""" + Read a NumPy array from a text file. + This is a helper function for loadtxt. + + Parameters + ---------- + fname : file, str, or pathlib.Path + The filename or the file to be read. + delimiter : str, optional + Field delimiter of the fields in line of the file. + Default is a comma, ','. If None any sequence of whitespace is + considered a delimiter. + comment : str or sequence of str or None, optional + Character that begins a comment. All text from the comment + character to the end of the line is ignored. + Multiple comments or multiple-character comment strings are supported, + but may be slower and `quote` must be empty if used. + Use None to disable all use of comments. + quote : str or None, optional + Character that is used to quote string fields. Default is '"' + (a double quote). Use None to disable quote support. + imaginary_unit : str, optional + Character that represent the imaginary unit `sqrt(-1)`. + Default is 'j'. + usecols : array_like, optional + A one-dimensional array of integer column numbers. These are the + columns from the file to be included in the array. If this value + is not given, all the columns are used. + skiplines : int, optional + Number of lines to skip before interpreting the data in the file. + max_rows : int, optional + Maximum number of rows of data to read. Default is to read the + entire file. + converters : dict or callable, optional + A function to parse all columns strings into the desired value, or + a dictionary mapping column number to a parser function. + E.g. if column 0 is a date string: ``converters = {0: datestr2num}``. + Converters can also be used to provide a default value for missing + data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will + convert empty fields to 0. + Default: None + ndmin : int, optional + Minimum dimension of the array returned. + Allowed values are 0, 1 or 2. Default is 0. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = read(...)``. When used with a structured + data-type, arrays are returned for each field. Default is False. + dtype : numpy data type + A NumPy dtype instance, can be a structured dtype to map to the + columns of the file. + encoding : str, optional + Encoding used to decode the inputfile. The special value 'bytes' + (the default) enables backwards-compatible behavior for `converters`, + ensuring that inputs to the converter functions are encoded + bytes objects. The special value 'bytes' has no additional effect if + ``converters=None``. If encoding is ``'bytes'`` or ``None``, the + default system encoding is used. + + Returns + ------- + ndarray + NumPy array. + """ + # Handle special 'bytes' keyword for encoding + byte_converters = False + if encoding == 'bytes': + encoding = None + byte_converters = True + + if dtype is None: + raise TypeError("a dtype must be provided.") + dtype = np.dtype(dtype) + + read_dtype_via_object_chunks = None + if dtype.kind in 'SUM' and ( + dtype == "S0" or dtype == "U0" or dtype == "M8" or dtype == 'm8'): + # This is a legacy "flexible" dtype. We do not truly support + # parametric dtypes currently (no dtype discovery step in the core), + # but have to support these for backward compatibility. + read_dtype_via_object_chunks = dtype + dtype = np.dtype(object) + + if usecols is not None: + # Allow usecols to be a single int or a sequence of ints, the C-code + # handles the rest + try: + usecols = list(usecols) + except TypeError: + usecols = [usecols] + + _ensure_ndmin_ndarray_check_param(ndmin) + + if comment is None: + comments = None + else: + # assume comments are a sequence of strings + if "" in comment: + raise ValueError( + "comments cannot be an empty string. Use comments=None to " + "disable comments." + ) + comments = tuple(comment) + comment = None + if len(comments) == 0: + comments = None # No comments at all + elif len(comments) == 1: + # If there is only one comment, and that comment has one character, + # the normal parsing can deal with it just fine. + if isinstance(comments[0], str) and len(comments[0]) == 1: + comment = comments[0] + comments = None + else: + # Input validation if there are multiple comment characters + if delimiter in comments: + raise TypeError( + f"Comment characters '{comments}' cannot include the " + f"delimiter '{delimiter}'" + ) + + # comment is now either a 1 or 0 character string or a tuple: + if comments is not None: + # Note: An earlier version support two character comments (and could + # have been extended to multiple characters, we assume this is + # rare enough to not optimize for. + if quote is not None: + raise ValueError( + "when multiple comments or a multi-character comment is " + "given, quotes are not supported. In this case quotechar " + "must be set to None.") + + if len(imaginary_unit) != 1: + raise ValueError('len(imaginary_unit) must be 1.') + + _check_nonneg_int(skiplines) + if max_rows is not None: + _check_nonneg_int(max_rows) + else: + # Passing -1 to the C code means "read the entire file". + max_rows = -1 + + fh_closing_ctx = contextlib.nullcontext() + filelike = False + try: + if isinstance(fname, os.PathLike): + fname = os.fspath(fname) + if isinstance(fname, str): + fh = np.lib._datasource.open(fname, 'rt', encoding=encoding) + if encoding is None: + encoding = getattr(fh, 'encoding', 'latin1') + + fh_closing_ctx = contextlib.closing(fh) + data = fh + filelike = True + else: + if encoding is None: + encoding = getattr(fname, 'encoding', 'latin1') + data = iter(fname) + except TypeError as e: + raise ValueError( + f"fname must be a string, filehandle, list of strings,\n" + f"or generator. Got {type(fname)} instead.") from e + + with fh_closing_ctx: + if comments is not None: + if filelike: + data = iter(data) + filelike = False + data = _preprocess_comments(data, comments, encoding) + + if read_dtype_via_object_chunks is None: + arr = _load_from_filelike( + data, delimiter=delimiter, comment=comment, quote=quote, + imaginary_unit=imaginary_unit, + usecols=usecols, skiplines=skiplines, max_rows=max_rows, + converters=converters, dtype=dtype, + encoding=encoding, filelike=filelike, + byte_converters=byte_converters) + + else: + # This branch reads the file into chunks of object arrays and then + # casts them to the desired actual dtype. This ensures correct + # string-length and datetime-unit discovery (like `arr.astype()`). + # Due to chunking, certain error reports are less clear, currently. + if filelike: + data = iter(data) # cannot chunk when reading from file + filelike = False + + c_byte_converters = False + if read_dtype_via_object_chunks == "S": + c_byte_converters = True # Use latin1 rather than ascii + + chunks = [] + while max_rows != 0: + if max_rows < 0: + chunk_size = _loadtxt_chunksize + else: + chunk_size = min(_loadtxt_chunksize, max_rows) + + next_arr = _load_from_filelike( + data, delimiter=delimiter, comment=comment, quote=quote, + imaginary_unit=imaginary_unit, + usecols=usecols, skiplines=skiplines, max_rows=chunk_size, + converters=converters, dtype=dtype, + encoding=encoding, filelike=filelike, + byte_converters=byte_converters, + c_byte_converters=c_byte_converters) + # Cast here already. We hope that this is better even for + # large files because the storage is more compact. It could + # be adapted (in principle the concatenate could cast). + chunks.append(next_arr.astype(read_dtype_via_object_chunks)) + + skiprows = 0 # Only have to skip for first chunk + if max_rows >= 0: + max_rows -= chunk_size + if len(next_arr) < chunk_size: + # There was less data than requested, so we are done. + break + + # Need at least one chunk, but if empty, the last one may have + # the wrong shape. + if len(chunks) > 1 and len(chunks[-1]) == 0: + del chunks[-1] + if len(chunks) == 1: + arr = chunks[0] + else: + arr = np.concatenate(chunks, axis=0) + + # NOTE: ndmin works as advertised for structured dtypes, but normally + # these would return a 1D result plus the structured dimension, + # so ndmin=2 adds a third dimension even when no squeezing occurs. + # A `squeeze=False` could be a better solution (pandas uses squeeze). + arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin) + + if arr.shape: + if arr.shape[0] == 0: + warnings.warn( + f'loadtxt: input contained no data: "{fname}"', + category=UserWarning, + stacklevel=3 + ) + + if unpack: + # Unpack structured dtypes if requested: + dt = arr.dtype + if dt.names is not None: + # For structured arrays, return an array for each field. + return [arr[field] for field in dt.names] + else: + return arr.T + else: + return arr + + +@set_array_function_like_doc +@set_module('numpy') +def loadtxt(fname, dtype=float, comments='#', delimiter=None, + converters=None, skiprows=0, usecols=None, unpack=False, + ndmin=0, encoding=None, max_rows=None, *, quotechar=None, + like=None): + r""" + Load data from a text file. + + Parameters + ---------- + fname : file, str, pathlib.Path, list of str, generator + File, filename, list, or generator to read. If the filename + extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note + that generators must return bytes or strings. The strings + in a list or produced by a generator are treated as lines. + dtype : data-type, optional + Data-type of the resulting array; default: float. If this is a + structured data-type, the resulting array will be 1-dimensional, and + each row will be interpreted as an element of the array. In this + case, the number of columns used must match the number of fields in + the data-type. + comments : str or sequence of str or None, optional + The characters or list of characters used to indicate the start of a + comment. None implies no comments. For backwards compatibility, byte + strings will be decoded as 'latin1'. The default is '#'. + delimiter : str, optional + The character used to separate the values. For backwards compatibility, + byte strings will be decoded as 'latin1'. The default is whitespace. + + .. versionchanged:: 1.23.0 + Only single character delimiters are supported. Newline characters + cannot be used as the delimiter. + + converters : dict or callable, optional + Converter functions to customize value parsing. If `converters` is + callable, the function is applied to all columns, else it must be a + dict that maps column number to a parser function. + See examples for further details. + Default: None. + + .. versionchanged:: 1.23.0 + The ability to pass a single callable to be applied to all columns + was added. + + skiprows : int, optional + Skip the first `skiprows` lines, including comments; default: 0. + usecols : int or sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. + The default, None, results in all columns being read. + + .. versionchanged:: 1.11.0 + When a single column has to be read it is possible to use + an integer instead of a tuple. E.g ``usecols = 3`` reads the + fourth column the same way as ``usecols = (3,)`` would. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = loadtxt(...)``. When used with a + structured data-type, arrays are returned for each field. + Default is False. + ndmin : int, optional + The returned array will have at least `ndmin` dimensions. + Otherwise mono-dimensional axes will be squeezed. + Legal values: 0 (default), 1 or 2. + + .. versionadded:: 1.6.0 + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + The special value 'bytes' enables backward compatibility workarounds + that ensures you receive byte arrays as results if possible and passes + 'latin1' encoded strings to converters. Override this value to receive + unicode arrays and pass strings as input to converters. If set to None + the system default is used. The default value is 'bytes'. + + .. versionadded:: 1.14.0 + .. versionchanged:: 2.0 + Before NumPy 2, the default was ``'bytes'`` for Python 2 + compatibility. The default is now ``None``. + + max_rows : int, optional + Read `max_rows` rows of content after `skiprows` lines. The default is + to read all the rows. Note that empty rows containing no data such as + empty lines and comment lines are not counted towards `max_rows`, + while such lines are counted in `skiprows`. + + .. versionadded:: 1.16.0 + + .. versionchanged:: 1.23.0 + Lines containing no data, including comment lines (e.g., lines + starting with '#' or as specified via `comments`) are not counted + towards `max_rows`. + quotechar : unicode character or None, optional + The character used to denote the start and end of a quoted item. + Occurrences of the delimiter or comment characters are ignored within + a quoted item. The default value is ``quotechar=None``, which means + quoting support is disabled. + + If two consecutive instances of `quotechar` are found within a quoted + field, the first is treated as an escape character. See examples. + + .. versionadded:: 1.23.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Data read from the text file. + + See Also + -------- + load, fromstring, fromregex + genfromtxt : Load data with missing values handled as specified. + scipy.io.loadmat : reads MATLAB data files + + Notes + ----- + This function aims to be a fast reader for simply formatted files. The + `genfromtxt` function provides more sophisticated handling of, e.g., + lines with missing values. + + Each row in the input text file must have the same number of values to be + able to read all values. If all rows do not have same number of values, a + subset of up to n columns (where n is the least number of values present + in all rows) can be read by specifying the columns via `usecols`. + + .. versionadded:: 1.10.0 + + The strings produced by the Python float.hex method can be used as + input for floats. + + Examples + -------- + >>> from io import StringIO # StringIO behaves like a file object + >>> c = StringIO("0 1\n2 3") + >>> np.loadtxt(c) + array([[0., 1.], + [2., 3.]]) + + >>> d = StringIO("M 21 72\nF 35 58") + >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), + ... 'formats': ('S1', 'i4', 'f4')}) + array([(b'M', 21, 72.), (b'F', 35, 58.)], + dtype=[('gender', 'S1'), ('age', '>> c = StringIO("1,0,2\n3,0,4") + >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) + >>> x + array([1., 3.]) + >>> y + array([2., 4.]) + + The `converters` argument is used to specify functions to preprocess the + text prior to parsing. `converters` can be a dictionary that maps + preprocessing functions to each column: + + >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n") + >>> conv = { + ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0 + ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1 + ... } + >>> np.loadtxt(s, delimiter=",", converters=conv) + array([[1., 3.], + [3., 5.]]) + + `converters` can be a callable instead of a dictionary, in which case it + is applied to all columns: + + >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE") + >>> import functools + >>> conv = functools.partial(int, base=16) + >>> np.loadtxt(s, converters=conv) + array([[222., 173.], + [192., 222.]]) + + This example shows how `converters` can be used to convert a field + with a trailing minus sign into a negative number. + + >>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94") + >>> def conv(fld): + ... return -float(fld[:-1]) if fld.endswith("-") else float(fld) + ... + >>> np.loadtxt(s, converters=conv) + array([[ 10.01, -31.25], + [ 19.22, 64.31], + [-17.57, 63.94]]) + + Using a callable as the converter can be particularly useful for handling + values with different formatting, e.g. floats with underscores: + + >>> s = StringIO("1 2.7 100_000") + >>> np.loadtxt(s, converters=float) + array([1.e+00, 2.7e+00, 1.e+05]) + + This idea can be extended to automatically handle values specified in + many different formats, such as hex values: + + >>> def conv(val): + ... try: + ... return float(val) + ... except ValueError: + ... return float.fromhex(val) + >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2") + >>> np.loadtxt(s, delimiter=",", converters=conv) + array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00]) + + Or a format where the ``-`` sign comes after the number: + + >>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94") + >>> conv = lambda x: -float(x[:-1]) if x.endswith("-") else float(x) + >>> np.loadtxt(s, converters=conv) + array([[ 10.01, -31.25], + [ 19.22, 64.31], + [-17.57, 63.94]]) + + Support for quoted fields is enabled with the `quotechar` parameter. + Comment and delimiter characters are ignored when they appear within a + quoted item delineated by `quotechar`: + + >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n') + >>> dtype = np.dtype([("label", "U12"), ("value", float)]) + >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"') + array([('alpha, #42', 10.), ('beta, #64', 2.)], + dtype=[('label', '>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n') + >>> dtype = np.dtype([("label", "U12"), ("value", float)]) + >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"') + array([('alpha, #42', 10.), ('beta, #64', 2.)], + dtype=[('label', '>> s = StringIO('"Hello, my name is ""Monty""!"') + >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"') + array('Hello, my name is "Monty"!', dtype='>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20") + >>> np.loadtxt(d, usecols=(0, 1)) + array([[ 1., 2.], + [ 2., 4.], + [ 3., 9.], + [ 4., 16.]]) + + """ + + if like is not None: + return _loadtxt_with_like( + like, fname, dtype=dtype, comments=comments, delimiter=delimiter, + converters=converters, skiprows=skiprows, usecols=usecols, + unpack=unpack, ndmin=ndmin, encoding=encoding, + max_rows=max_rows + ) + + if isinstance(delimiter, bytes): + delimiter.decode("latin1") + + if dtype is None: + dtype = np.float64 + + comment = comments + # Control character type conversions for Py3 convenience + if comment is not None: + if isinstance(comment, (str, bytes)): + comment = [comment] + comment = [ + x.decode('latin1') if isinstance(x, bytes) else x for x in comment] + if isinstance(delimiter, bytes): + delimiter = delimiter.decode('latin1') + + arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter, + converters=converters, skiplines=skiprows, usecols=usecols, + unpack=unpack, ndmin=ndmin, encoding=encoding, + max_rows=max_rows, quote=quotechar) + + return arr + + +_loadtxt_with_like = array_function_dispatch()(loadtxt) + + +def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None, + header=None, footer=None, comments=None, + encoding=None): + return (X,) + + +@array_function_dispatch(_savetxt_dispatcher) +def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', + footer='', comments='# ', encoding=None): + """ + Save an array to a text file. + + Parameters + ---------- + fname : filename, file handle or pathlib.Path + If the filename ends in ``.gz``, the file is automatically saved in + compressed gzip format. `loadtxt` understands gzipped files + transparently. + X : 1D or 2D array_like + Data to be saved to a text file. + fmt : str or sequence of strs, optional + A single format (%10.5f), a sequence of formats, or a + multi-format string, e.g. 'Iteration %d -- %10.5f', in which + case `delimiter` is ignored. For complex `X`, the legal options + for `fmt` are: + + * a single specifier, ``fmt='%.4e'``, resulting in numbers formatted + like ``' (%s+%sj)' % (fmt, fmt)`` + * a full string specifying every real and imaginary part, e.g. + ``' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'`` for 3 columns + * a list of specifiers, one per column - in this case, the real + and imaginary part must have separate specifiers, + e.g. ``['%.3e + %.3ej', '(%.15e%+.15ej)']`` for 2 columns + delimiter : str, optional + String or character separating columns. + newline : str, optional + String or character separating lines. + + .. versionadded:: 1.5.0 + header : str, optional + String that will be written at the beginning of the file. + + .. versionadded:: 1.7.0 + footer : str, optional + String that will be written at the end of the file. + + .. versionadded:: 1.7.0 + comments : str, optional + String that will be prepended to the ``header`` and ``footer`` strings, + to mark them as comments. Default: '# ', as expected by e.g. + ``numpy.loadtxt``. + + .. versionadded:: 1.7.0 + encoding : {None, str}, optional + Encoding used to encode the outputfile. Does not apply to output + streams. If the encoding is something other than 'bytes' or 'latin1' + you will not be able to load the file in NumPy versions < 1.14. Default + is 'latin1'. + + .. versionadded:: 1.14.0 + + + See Also + -------- + save : Save an array to a binary file in NumPy ``.npy`` format + savez : Save several arrays into an uncompressed ``.npz`` archive + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + Further explanation of the `fmt` parameter + (``%[flag]width[.precision]specifier``): + + flags: + ``-`` : left justify + + ``+`` : Forces to precede result with + or -. + + ``0`` : Left pad the number with zeros instead of space (see width). + + width: + Minimum number of characters to be printed. The value is not truncated + if it has more characters. + + precision: + - For integer specifiers (eg. ``d,i,o,x``), the minimum number of + digits. + - For ``e, E`` and ``f`` specifiers, the number of digits to print + after the decimal point. + - For ``g`` and ``G``, the maximum number of significant digits. + - For ``s``, the maximum number of characters. + + specifiers: + ``c`` : character + + ``d`` or ``i`` : signed decimal integer + + ``e`` or ``E`` : scientific notation with ``e`` or ``E``. + + ``f`` : decimal floating point + + ``g,G`` : use the shorter of ``e,E`` or ``f`` + + ``o`` : signed octal + + ``s`` : string of characters + + ``u`` : unsigned decimal integer + + ``x,X`` : unsigned hexadecimal integer + + This explanation of ``fmt`` is not complete, for an exhaustive + specification see [1]_. + + References + ---------- + .. [1] `Format Specification Mini-Language + `_, + Python Documentation. + + Examples + -------- + >>> x = y = z = np.arange(0.0,5.0,1.0) + >>> np.savetxt('test.out', x, delimiter=',') # X is an array + >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays + >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation + + """ + + class WriteWrap: + """Convert to bytes on bytestream inputs. + + """ + def __init__(self, fh, encoding): + self.fh = fh + self.encoding = encoding + self.do_write = self.first_write + + def close(self): + self.fh.close() + + def write(self, v): + self.do_write(v) + + def write_bytes(self, v): + if isinstance(v, bytes): + self.fh.write(v) + else: + self.fh.write(v.encode(self.encoding)) + + def write_normal(self, v): + self.fh.write(asunicode(v)) + + def first_write(self, v): + try: + self.write_normal(v) + self.write = self.write_normal + except TypeError: + # input is probably a bytestream + self.write_bytes(v) + self.write = self.write_bytes + + own_fh = False + if isinstance(fname, os.PathLike): + fname = os.fspath(fname) + if _is_string_like(fname): + # datasource doesn't support creating a new file ... + open(fname, 'wt').close() + fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) + own_fh = True + elif hasattr(fname, 'write'): + # wrap to handle byte output streams + fh = WriteWrap(fname, encoding or 'latin1') + else: + raise ValueError('fname must be a string or file handle') + + try: + X = np.asarray(X) + + # Handle 1-dimensional arrays + if X.ndim == 0 or X.ndim > 2: + raise ValueError( + "Expected 1D or 2D array, got %dD array instead" % X.ndim) + elif X.ndim == 1: + # Common case -- 1d array of numbers + if X.dtype.names is None: + X = np.atleast_2d(X).T + ncol = 1 + + # Complex dtype -- each field indicates a separate column + else: + ncol = len(X.dtype.names) + else: + ncol = X.shape[1] + + iscomplex_X = np.iscomplexobj(X) + # `fmt` can be a string with multiple insertion points or a + # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') + if type(fmt) in (list, tuple): + if len(fmt) != ncol: + raise AttributeError('fmt has wrong shape. %s' % str(fmt)) + format = delimiter.join(fmt) + elif isinstance(fmt, str): + n_fmt_chars = fmt.count('%') + error = ValueError('fmt has wrong number of %% formats: %s' % fmt) + if n_fmt_chars == 1: + if iscomplex_X: + fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol + else: + fmt = [fmt, ] * ncol + format = delimiter.join(fmt) + elif iscomplex_X and n_fmt_chars != (2 * ncol): + raise error + elif ((not iscomplex_X) and n_fmt_chars != ncol): + raise error + else: + format = fmt + else: + raise ValueError('invalid fmt: %r' % (fmt,)) + + if len(header) > 0: + header = header.replace('\n', '\n' + comments) + fh.write(comments + header + newline) + if iscomplex_X: + for row in X: + row2 = [] + for number in row: + row2.append(number.real) + row2.append(number.imag) + s = format % tuple(row2) + newline + fh.write(s.replace('+-', '-')) + else: + for row in X: + try: + v = format % tuple(row) + newline + except TypeError as e: + raise TypeError("Mismatch between array dtype ('%s') and " + "format specifier ('%s')" + % (str(X.dtype), format)) from e + fh.write(v) + + if len(footer) > 0: + footer = footer.replace('\n', '\n' + comments) + fh.write(comments + footer + newline) + finally: + if own_fh: + fh.close() + + +@set_module('numpy') +def fromregex(file, regexp, dtype, encoding=None): + r""" + Construct an array from a text file, using regular expression parsing. + + The returned array is always a structured array, and is constructed from + all matches of the regular expression in the file. Groups in the regular + expression are converted to fields of the structured array. + + Parameters + ---------- + file : file, str, or pathlib.Path + Filename or file object to read. + + .. versionchanged:: 1.22.0 + Now accepts `os.PathLike` implementations. + regexp : str or regexp + Regular expression used to parse the file. + Groups in the regular expression correspond to fields in the dtype. + dtype : dtype or list of dtypes + Dtype for the structured array; must be a structured datatype. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + + .. versionadded:: 1.14.0 + + Returns + ------- + output : ndarray + The output array, containing the part of the content of `file` that + was matched by `regexp`. `output` is always a structured array. + + Raises + ------ + TypeError + When `dtype` is not a valid dtype for a structured array. + + See Also + -------- + fromstring, loadtxt + + Notes + ----- + Dtypes for structured arrays can be specified in several forms, but all + forms specify at least the data type and field name. For details see + `basics.rec`. + + Examples + -------- + >>> from io import StringIO + >>> text = StringIO("1312 foo\n1534 bar\n444 qux") + + >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything] + >>> output = np.fromregex(text, regexp, + ... [('num', np.int64), ('key', 'S3')]) + >>> output + array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], + dtype=[('num', '>> output['num'] + array([1312, 1534, 444]) + + """ + own_fh = False + if not hasattr(file, "read"): + file = os.fspath(file) + file = np.lib._datasource.open(file, 'rt', encoding=encoding) + own_fh = True + + try: + if not isinstance(dtype, np.dtype): + dtype = np.dtype(dtype) + if dtype.names is None: + raise TypeError('dtype must be a structured datatype.') + + content = file.read() + if isinstance(content, bytes) and isinstance(regexp, str): + regexp = asbytes(regexp) + + if not hasattr(regexp, 'match'): + regexp = re.compile(regexp) + seq = regexp.findall(content) + if seq and not isinstance(seq[0], tuple): + # Only one group is in the regexp. + # Create the new array as a single data-type and then + # re-interpret as a single-field structured array. + newdtype = np.dtype(dtype[dtype.names[0]]) + output = np.array(seq, dtype=newdtype) + output.dtype = dtype + else: + output = np.array(seq, dtype=dtype) + + return output + finally: + if own_fh: + file.close() + + +#####-------------------------------------------------------------------------- +#---- --- ASCII functions --- +#####-------------------------------------------------------------------------- + + +@set_array_function_like_doc +@set_module('numpy') +def genfromtxt(fname, dtype=float, comments='#', delimiter=None, + skip_header=0, skip_footer=0, converters=None, + missing_values=None, filling_values=None, usecols=None, + names=None, excludelist=None, + deletechars=''.join(sorted(NameValidator.defaultdeletechars)), + replace_space='_', autostrip=False, case_sensitive=True, + defaultfmt="f%i", unpack=None, usemask=False, loose=True, + invalid_raise=True, max_rows=None, encoding=None, + *, ndmin=0, like=None): + """ + Load data from a text file, with missing values handled as specified. + + Each line past the first `skip_header` lines is split at the `delimiter` + character, and characters following the `comments` character are discarded. + + Parameters + ---------- + fname : file, str, pathlib.Path, list of str, generator + File, filename, list, or generator to read. If the filename + extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note + that generators must return bytes or strings. The strings + in a list or produced by a generator are treated as lines. + dtype : dtype, optional + Data type of the resulting array. + If None, the dtypes will be determined by the contents of each + column, individually. + comments : str, optional + The character used to indicate the start of a comment. + All the characters occurring on a line after a comment are discarded. + delimiter : str, int, or sequence, optional + The string used to separate values. By default, any consecutive + whitespaces act as delimiter. An integer or sequence of integers + can also be provided as width(s) of each field. + skiprows : int, optional + `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. + skip_header : int, optional + The number of lines to skip at the beginning of the file. + skip_footer : int, optional + The number of lines to skip at the end of the file. + converters : variable, optional + The set of functions that convert the data of a column to a value. + The converters can also be used to provide a default value + for missing data: ``converters = {3: lambda s: float(s or 0)}``. + missing : variable, optional + `missing` was removed in numpy 1.10. Please use `missing_values` + instead. + missing_values : variable, optional + The set of strings corresponding to missing data. + filling_values : variable, optional + The set of values to be used as default when the data are missing. + usecols : sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. + names : {None, True, str, sequence}, optional + If `names` is True, the field names are read from the first line after + the first `skip_header` lines. This line can optionally be preceded + by a comment delimiter. Any content before the comment delimiter is + discarded. If `names` is a sequence or a single-string of + comma-separated names, the names will be used to define the field + names in a structured dtype. If `names` is None, the names of the + dtype fields will be used, if any. + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default list + ['return','file','print']. Excluded names are appended with an + underscore: for example, `file` would become `file_`. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + defaultfmt : str, optional + A format used to define default field names, such as "f%i" or "f_%02i". + autostrip : bool, optional + Whether to automatically strip white spaces from the variables. + replace_space : char, optional + Character(s) used in replacement of white spaces in the variable + names. By default, use a '_'. + case_sensitive : {True, False, 'upper', 'lower'}, optional + If True, field names are case sensitive. + If False or 'upper', field names are converted to upper case. + If 'lower', field names are converted to lower case. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = genfromtxt(...)``. When used with a + structured data-type, arrays are returned for each field. + Default is False. + usemask : bool, optional + If True, return a masked array. + If False, return a regular array. + loose : bool, optional + If True, do not raise errors for invalid values. + invalid_raise : bool, optional + If True, an exception is raised if an inconsistency is detected in the + number of columns. + If False, a warning is emitted and the offending lines are skipped. + max_rows : int, optional + The maximum number of rows to read. Must not be used with skip_footer + at the same time. If given, the value must be at least 1. Default is + to read the entire file. + + .. versionadded:: 1.10.0 + encoding : str, optional + Encoding used to decode the inputfile. Does not apply when `fname` + is a file object. The special value 'bytes' enables backward + compatibility workarounds that ensure that you receive byte arrays + when possible and passes latin1 encoded strings to converters. + Override this value to receive unicode arrays and pass strings + as input to converters. If set to None the system default is used. + The default value is 'bytes'. + + .. versionadded:: 1.14.0 + .. versionchanged:: 2.0 + Before NumPy 2, the default was ``'bytes'`` for Python 2 + compatibility. The default is now ``None``. + + ndmin : int, optional + Same parameter as `loadtxt` + + .. versionadded:: 1.23.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Data read from the text file. If `usemask` is True, this is a + masked array. + + See Also + -------- + numpy.loadtxt : equivalent function when no data is missing. + + Notes + ----- + * When spaces are used as delimiters, or when no delimiter has been given + as input, there should not be any missing data between two fields. + * When variables are named (either by a flexible dtype or with a `names` + sequence), there must not be any header in the file (else a ValueError + exception is raised). + * Individual values are not stripped of spaces by default. + When using a custom converter, make sure the function does remove spaces. + * Custom converters may receive unexpected values due to dtype + discovery. + + References + ---------- + .. [1] NumPy User Guide, section `I/O with NumPy + `_. + + Examples + -------- + >>> from io import StringIO + >>> import numpy as np + + Comma delimited file with mixed dtype + + >>> s = StringIO("1,1.3,abcde") + >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), + ... ('mystring','S5')], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> _ = s.seek(0) # needed for StringIO example only + >>> data = np.genfromtxt(s, dtype=None, + ... names = ['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, 'abcde'), + dtype=[('myint', '>> _ = s.seek(0) + >>> data = np.genfromtxt(s, dtype="i8,f8,S5", + ... names=['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> s = StringIO("11.3abcde") + >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], + ... delimiter=[1,3,5]) + >>> data + array((1, 1.3, 'abcde'), + dtype=[('intvar', '>> f = StringIO(''' + ... text,# of chars + ... hello world,11 + ... numpy,5''') + >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',') + array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')], + dtype=[('f0', 'S12'), ('f1', 'S12')]) + + """ + + if like is not None: + return _genfromtxt_with_like( + like, fname, dtype=dtype, comments=comments, delimiter=delimiter, + skip_header=skip_header, skip_footer=skip_footer, + converters=converters, missing_values=missing_values, + filling_values=filling_values, usecols=usecols, names=names, + excludelist=excludelist, deletechars=deletechars, + replace_space=replace_space, autostrip=autostrip, + case_sensitive=case_sensitive, defaultfmt=defaultfmt, + unpack=unpack, usemask=usemask, loose=loose, + invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding, + ndmin=ndmin, + ) + + _ensure_ndmin_ndarray_check_param(ndmin) + + if max_rows is not None: + if skip_footer: + raise ValueError( + "The keywords 'skip_footer' and 'max_rows' can not be " + "specified at the same time.") + if max_rows < 1: + raise ValueError("'max_rows' must be at least 1.") + + if usemask: + from numpy.ma import MaskedArray, make_mask_descr + # Check the input dictionary of converters + user_converters = converters or {} + if not isinstance(user_converters, dict): + raise TypeError( + "The input argument 'converter' should be a valid dictionary " + "(got '%s' instead)" % type(user_converters)) + + if encoding == 'bytes': + encoding = None + byte_converters = True + else: + byte_converters = False + + # Initialize the filehandle, the LineSplitter and the NameValidator + if isinstance(fname, os.PathLike): + fname = os.fspath(fname) + if isinstance(fname, str): + fid = np.lib._datasource.open(fname, 'rt', encoding=encoding) + fid_ctx = contextlib.closing(fid) + else: + fid = fname + fid_ctx = contextlib.nullcontext(fid) + try: + fhd = iter(fid) + except TypeError as e: + raise TypeError( + "fname must be a string, a filehandle, a sequence of strings,\n" + f"or an iterator of strings. Got {type(fname)} instead." + ) from e + with fid_ctx: + split_line = LineSplitter(delimiter=delimiter, comments=comments, + autostrip=autostrip, encoding=encoding) + validate_names = NameValidator(excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive, + replace_space=replace_space) + + # Skip the first `skip_header` rows + try: + for i in range(skip_header): + next(fhd) + + # Keep on until we find the first valid values + first_values = None + + while not first_values: + first_line = _decode_line(next(fhd), encoding) + if (names is True) and (comments is not None): + if comments in first_line: + first_line = ( + ''.join(first_line.split(comments)[1:])) + first_values = split_line(first_line) + except StopIteration: + # return an empty array if the datafile is empty + first_line = '' + first_values = [] + warnings.warn( + 'genfromtxt: Empty input file: "%s"' % fname, stacklevel=2 + ) + + # Should we take the first values as names ? + if names is True: + fval = first_values[0].strip() + if comments is not None: + if fval in comments: + del first_values[0] + + # Check the columns to use: make sure `usecols` is a list + if usecols is not None: + try: + usecols = [_.strip() for _ in usecols.split(",")] + except AttributeError: + try: + usecols = list(usecols) + except TypeError: + usecols = [usecols, ] + nbcols = len(usecols or first_values) + + # Check the names and overwrite the dtype.names if needed + if names is True: + names = validate_names([str(_.strip()) for _ in first_values]) + first_line = '' + elif _is_string_like(names): + names = validate_names([_.strip() for _ in names.split(',')]) + elif names: + names = validate_names(names) + # Get the dtype + if dtype is not None: + dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, + excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive, + replace_space=replace_space) + # Make sure the names is a list (for 2.5) + if names is not None: + names = list(names) + + if usecols: + for (i, current) in enumerate(usecols): + # if usecols is a list of names, convert to a list of indices + if _is_string_like(current): + usecols[i] = names.index(current) + elif current < 0: + usecols[i] = current + len(first_values) + # If the dtype is not None, make sure we update it + if (dtype is not None) and (len(dtype) > nbcols): + descr = dtype.descr + dtype = np.dtype([descr[_] for _ in usecols]) + names = list(dtype.names) + # If `names` is not None, update the names + elif (names is not None) and (len(names) > nbcols): + names = [names[_] for _ in usecols] + elif (names is not None) and (dtype is not None): + names = list(dtype.names) + + # Process the missing values ............................... + # Rename missing_values for convenience + user_missing_values = missing_values or () + if isinstance(user_missing_values, bytes): + user_missing_values = user_missing_values.decode('latin1') + + # Define the list of missing_values (one column: one list) + missing_values = [list(['']) for _ in range(nbcols)] + + # We have a dictionary: process it field by field + if isinstance(user_missing_values, dict): + # Loop on the items + for (key, val) in user_missing_values.items(): + # Is the key a string ? + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped + continue + # Redefine the key as needed if it's a column number + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Transform the value as a list of string + if isinstance(val, (list, tuple)): + val = [str(_) for _ in val] + else: + val = [str(val), ] + # Add the value(s) to the current list of missing + if key is None: + # None acts as default + for miss in missing_values: + miss.extend(val) + else: + missing_values[key].extend(val) + # We have a sequence : each item matches a column + elif isinstance(user_missing_values, (list, tuple)): + for (value, entry) in zip(user_missing_values, missing_values): + value = str(value) + if value not in entry: + entry.append(value) + # We have a string : apply it to all entries + elif isinstance(user_missing_values, str): + user_value = user_missing_values.split(",") + for entry in missing_values: + entry.extend(user_value) + # We have something else: apply it to all entries + else: + for entry in missing_values: + entry.extend([str(user_missing_values)]) + + # Process the filling_values ............................... + # Rename the input for convenience + user_filling_values = filling_values + if user_filling_values is None: + user_filling_values = [] + # Define the default + filling_values = [None] * nbcols + # We have a dictionary : update each entry individually + if isinstance(user_filling_values, dict): + for (key, val) in user_filling_values.items(): + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped + continue + # Redefine the key if it's a column number + # and usecols is defined + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Add the value to the list + filling_values[key] = val + # We have a sequence : update on a one-to-one basis + elif isinstance(user_filling_values, (list, tuple)): + n = len(user_filling_values) + if (n <= nbcols): + filling_values[:n] = user_filling_values + else: + filling_values = user_filling_values[:nbcols] + # We have something else : use it for all entries + else: + filling_values = [user_filling_values] * nbcols + + # Initialize the converters ................................ + if dtype is None: + # Note: we can't use a [...]*nbcols, as we would have 3 times + # the same converter, instead of 3 different converters. + converters = [ + StringConverter(None, missing_values=miss, default=fill) + for (miss, fill) in zip(missing_values, filling_values) + ] + else: + dtype_flat = flatten_dtype(dtype, flatten_base=True) + # Initialize the converters + if len(dtype_flat) > 1: + # Flexible type : get a converter from each dtype + zipit = zip(dtype_flat, missing_values, filling_values) + converters = [StringConverter(dt, + locked=True, + missing_values=miss, + default=fill) + for (dt, miss, fill) in zipit] + else: + # Set to a default converter (but w/ different missing values) + zipit = zip(missing_values, filling_values) + converters = [StringConverter(dtype, + locked=True, + missing_values=miss, + default=fill) + for (miss, fill) in zipit] + # Update the converters to use the user-defined ones + uc_update = [] + for (j, conv) in user_converters.items(): + # If the converter is specified by column names, + # use the index instead + if _is_string_like(j): + try: + j = names.index(j) + i = j + except ValueError: + continue + elif usecols: + try: + i = usecols.index(j) + except ValueError: + # Unused converter specified + continue + else: + i = j + # Find the value to test - first_line is not filtered by usecols: + if len(first_line): + testing_value = first_values[j] + else: + testing_value = None + if conv is bytes: + user_conv = asbytes + elif byte_converters: + # Converters may use decode to workaround numpy's old + # behavior, so encode the string again before passing + # to the user converter. + def tobytes_first(x, conv): + if type(x) is bytes: + return conv(x) + return conv(x.encode("latin1")) + user_conv = functools.partial(tobytes_first, conv=conv) + else: + user_conv = conv + converters[i].update(user_conv, locked=True, + testing_value=testing_value, + default=filling_values[i], + missing_values=missing_values[i],) + uc_update.append((i, user_conv)) + # Make sure we have the corrected keys in user_converters... + user_converters.update(uc_update) + + # Fixme: possible error as following variable never used. + # miss_chars = [_.missing_values for _ in converters] + + # Initialize the output lists ... + # ... rows + rows = [] + append_to_rows = rows.append + # ... masks + if usemask: + masks = [] + append_to_masks = masks.append + # ... invalid + invalid = [] + append_to_invalid = invalid.append + + # Parse each line + for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): + values = split_line(line) + nbvalues = len(values) + # Skip an empty line + if nbvalues == 0: + continue + if usecols: + # Select only the columns we need + try: + values = [values[_] for _ in usecols] + except IndexError: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + elif nbvalues != nbcols: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + # Store the values + append_to_rows(tuple(values)) + if usemask: + append_to_masks(tuple([v.strip() in m + for (v, m) in zip(values, + missing_values)])) + if len(rows) == max_rows: + break + + # Upgrade the converters (if needed) + if dtype is None: + for (i, converter) in enumerate(converters): + current_column = [itemgetter(i)(_m) for _m in rows] + try: + converter.iterupgrade(current_column) + except ConverterLockError: + errmsg = "Converter #%i is locked and cannot be upgraded: " % i + current_column = map(itemgetter(i), rows) + for (j, value) in enumerate(current_column): + try: + converter.upgrade(value) + except (ConverterError, ValueError): + errmsg += "(occurred line #%i for value '%s')" + errmsg %= (j + 1 + skip_header, value) + raise ConverterError(errmsg) + + # Check that we don't have invalid values + nbinvalid = len(invalid) + if nbinvalid > 0: + nbrows = len(rows) + nbinvalid - skip_footer + # Construct the error message + template = " Line #%%i (got %%i columns instead of %i)" % nbcols + if skip_footer > 0: + nbinvalid_skipped = len([_ for _ in invalid + if _[0] > nbrows + skip_header]) + invalid = invalid[:nbinvalid - nbinvalid_skipped] + skip_footer -= nbinvalid_skipped +# +# nbrows -= skip_footer +# errmsg = [template % (i, nb) +# for (i, nb) in invalid if i < nbrows] +# else: + errmsg = [template % (i, nb) + for (i, nb) in invalid] + if len(errmsg): + errmsg.insert(0, "Some errors were detected !") + errmsg = "\n".join(errmsg) + # Raise an exception ? + if invalid_raise: + raise ValueError(errmsg) + # Issue a warning ? + else: + warnings.warn(errmsg, ConversionWarning, stacklevel=2) + + # Strip the last skip_footer data + if skip_footer > 0: + rows = rows[:-skip_footer] + if usemask: + masks = masks[:-skip_footer] + + # Convert each value according to the converter: + # We want to modify the list in place to avoid creating a new one... + if loose: + rows = list( + zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + else: + rows = list( + zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + + # Reset the dtype + data = rows + if dtype is None: + # Get the dtypes from the types of the converters + column_types = [conv.type for conv in converters] + # Find the columns with strings... + strcolidx = [i for (i, v) in enumerate(column_types) + if v == np.str_] + + if byte_converters and strcolidx: + # convert strings back to bytes for backward compatibility + warnings.warn( + "Reading unicode strings without specifying the encoding " + "argument is deprecated. Set the encoding, use None for the " + "system default.", + np.exceptions.VisibleDeprecationWarning, stacklevel=2) + + def encode_unicode_cols(row_tup): + row = list(row_tup) + for i in strcolidx: + row[i] = row[i].encode('latin1') + return tuple(row) + + try: + data = [encode_unicode_cols(r) for r in data] + except UnicodeEncodeError: + pass + else: + for i in strcolidx: + column_types[i] = np.bytes_ + + # Update string types to be the right length + sized_column_types = column_types[:] + for i, col_type in enumerate(column_types): + if np.issubdtype(col_type, np.character): + n_chars = max(len(row[i]) for row in data) + sized_column_types[i] = (col_type, n_chars) + + if names is None: + # If the dtype is uniform (before sizing strings) + base = { + c_type + for c, c_type in zip(converters, column_types) + if c._checked} + if len(base) == 1: + uniform_type, = base + (ddtype, mdtype) = (uniform_type, bool) + else: + ddtype = [(defaultfmt % i, dt) + for (i, dt) in enumerate(sized_column_types)] + if usemask: + mdtype = [(defaultfmt % i, bool) + for (i, dt) in enumerate(sized_column_types)] + else: + ddtype = list(zip(names, sized_column_types)) + mdtype = list(zip(names, [bool] * len(sized_column_types))) + output = np.array(data, dtype=ddtype) + if usemask: + outputmask = np.array(masks, dtype=mdtype) + else: + # Overwrite the initial dtype names if needed + if names and dtype.names is not None: + dtype.names = names + # Case 1. We have a structured type + if len(dtype_flat) > 1: + # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] + # First, create the array using a flattened dtype: + # [('a', int), ('b1', int), ('b2', float)] + # Then, view the array using the specified dtype. + if 'O' in (_.char for _ in dtype_flat): + if has_nested_fields(dtype): + raise NotImplementedError( + "Nested fields involving objects are not supported...") + else: + output = np.array(data, dtype=dtype) + else: + rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) + output = rows.view(dtype) + # Now, process the rowmasks the same way + if usemask: + rowmasks = np.array( + masks, dtype=np.dtype([('', bool) for t in dtype_flat])) + # Construct the new dtype + mdtype = make_mask_descr(dtype) + outputmask = rowmasks.view(mdtype) + # Case #2. We have a basic dtype + else: + # We used some user-defined converters + if user_converters: + ishomogeneous = True + descr = [] + for i, ttype in enumerate([conv.type for conv in converters]): + # Keep the dtype of the current converter + if i in user_converters: + ishomogeneous &= (ttype == dtype.type) + if np.issubdtype(ttype, np.character): + ttype = (ttype, max(len(row[i]) for row in data)) + descr.append(('', ttype)) + else: + descr.append(('', dtype)) + # So we changed the dtype ? + if not ishomogeneous: + # We have more than one field + if len(descr) > 1: + dtype = np.dtype(descr) + # We have only one field: drop the name if not needed. + else: + dtype = np.dtype(ttype) + # + output = np.array(data, dtype) + if usemask: + if dtype.names is not None: + mdtype = [(_, bool) for _ in dtype.names] + else: + mdtype = bool + outputmask = np.array(masks, dtype=mdtype) + # Try to take care of the missing data we missed + names = output.dtype.names + if usemask and names: + for (name, conv) in zip(names, converters): + missing_values = [conv(_) for _ in conv.missing_values + if _ != ''] + for mval in missing_values: + outputmask[name] |= (output[name] == mval) + # Construct the final array + if usemask: + output = output.view(MaskedArray) + output._mask = outputmask + + output = _ensure_ndmin_ndarray(output, ndmin=ndmin) + + if unpack: + if names is None: + return output.T + elif len(names) == 1: + # squeeze single-name dtypes too + return output[names[0]] + else: + # For structured arrays with multiple fields, + # return an array for each field. + return [output[field] for field in names] + return output + + +_genfromtxt_with_like = array_function_dispatch()(genfromtxt) + + +def recfromtxt(fname, **kwargs): + """ + Load ASCII data from a file and return it in a record array. + + If ``usemask=False`` a standard `recarray` is returned, + if ``usemask=True`` a MaskedRecords array is returned. + + .. deprecated:: 2.0 + Use `numpy.genfromtxt` instead. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`recfromtxt` is deprecated, " + "use `numpy.genfromtxt` instead." + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + kwargs.setdefault("dtype", None) + usemask = kwargs.get('usemask', False) + output = genfromtxt(fname, **kwargs) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output + + +def recfromcsv(fname, **kwargs): + """ + Load ASCII data stored in a comma-separated file. + + The returned array is a record array (if ``usemask=False``, see + `recarray`) or a masked record array (if ``usemask=True``, + see `ma.mrecords.MaskedRecords`). + + .. deprecated:: 2.0 + Use `numpy.genfromtxt` with comma as `delimiter` instead. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function to load ASCII data. + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`recfromcsv` is deprecated, " + "use `numpy.genfromtxt` with comma as `delimiter` instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + # Set default kwargs for genfromtxt as relevant to csv import. + kwargs.setdefault("case_sensitive", "lower") + kwargs.setdefault("names", True) + kwargs.setdefault("delimiter", ",") + kwargs.setdefault("dtype", None) + output = genfromtxt(fname, **kwargs) + + usemask = kwargs.get("usemask", False) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output diff --git a/phivenv/Lib/site-packages/numpy/lib/_npyio_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_npyio_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5ad4ee4f7f3232666ca091904ec0633d095c02f4 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_npyio_impl.pyi @@ -0,0 +1,348 @@ +import os +import sys +import zipfile +import types +from re import Pattern +from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable +from typing import ( + Literal as L, + Any, + TypeVar, + Generic, + IO, + overload, + Protocol, +) + +from numpy import ( + ndarray, + recarray, + dtype, + generic, + float64, + void, + record, +) + +from numpy.ma.mrecords import MaskedRecords +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _DTypeLike, + _SupportsArrayFunc, +) + +from numpy._core.multiarray import ( + packbits as packbits, + unpackbits as unpackbits, +) + +_T = TypeVar("_T") +_T_contra = TypeVar("_T_contra", contravariant=True) +_T_co = TypeVar("_T_co", covariant=True) +_SCT = TypeVar("_SCT", bound=generic) +_CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True) +_CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True) + +class _SupportsGetItem(Protocol[_T_contra, _T_co]): + def __getitem__(self, key: _T_contra, /) -> _T_co: ... + +class _SupportsRead(Protocol[_CharType_co]): + def read(self) -> _CharType_co: ... + +class _SupportsReadSeek(Protocol[_CharType_co]): + def read(self, n: int, /) -> _CharType_co: ... + def seek(self, offset: int, whence: int, /) -> object: ... + +class _SupportsWrite(Protocol[_CharType_contra]): + def write(self, s: _CharType_contra, /) -> object: ... + +__all__: list[str] + +class BagObj(Generic[_T_co]): + def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ... + def __getattribute__(self, key: str) -> _T_co: ... + def __dir__(self) -> list[str]: ... + +class NpzFile(Mapping[str, NDArray[Any]]): + zip: zipfile.ZipFile + fid: None | IO[str] + files: list[str] + allow_pickle: bool + pickle_kwargs: None | Mapping[str, Any] + _MAX_REPR_ARRAY_COUNT: int + # Represent `f` as a mutable property so we can access the type of `self` + @property + def f(self: _T) -> BagObj[_T]: ... + @f.setter + def f(self: _T, value: BagObj[_T]) -> None: ... + def __init__( + self, + fid: IO[str], + own_fid: bool = ..., + allow_pickle: bool = ..., + pickle_kwargs: None | Mapping[str, Any] = ..., + ) -> None: ... + def __enter__(self: _T) -> _T: ... + def __exit__( + self, + exc_type: None | type[BaseException], + exc_value: None | BaseException, + traceback: None | types.TracebackType, + /, + ) -> None: ... + def close(self) -> None: ... + def __del__(self) -> None: ... + def __iter__(self) -> Iterator[str]: ... + def __len__(self) -> int: ... + def __getitem__(self, key: str) -> NDArray[Any]: ... + def __contains__(self, key: str) -> bool: ... + def __repr__(self) -> str: ... + +class DataSource: + def __init__( + self, + destpath: None | str | os.PathLike[str] = ..., + ) -> None: ... + def __del__(self) -> None: ... + def abspath(self, path: str) -> str: ... + def exists(self, path: str) -> bool: ... + + # Whether the file-object is opened in string or bytes mode (by default) + # depends on the file-extension of `path` + def open( + self, + path: str, + mode: str = ..., + encoding: None | str = ..., + newline: None | str = ..., + ) -> IO[Any]: ... + +# NOTE: Returns a `NpzFile` if file is a zip file; +# returns an `ndarray`/`memmap` otherwise +def load( + file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes], + mmap_mode: L[None, "r+", "r", "w+", "c"] = ..., + allow_pickle: bool = ..., + fix_imports: bool = ..., + encoding: L["ASCII", "latin1", "bytes"] = ..., +) -> Any: ... + +def save( + file: str | os.PathLike[str] | _SupportsWrite[bytes], + arr: ArrayLike, + allow_pickle: bool = ..., + fix_imports: bool = ..., +) -> None: ... + +def savez( + file: str | os.PathLike[str] | _SupportsWrite[bytes], + *args: ArrayLike, + **kwds: ArrayLike, +) -> None: ... + +def savez_compressed( + file: str | os.PathLike[str] | _SupportsWrite[bytes], + *args: ArrayLike, + **kwds: ArrayLike, +) -> None: ... + +# File-like objects only have to implement `__iter__` and, +# optionally, `encoding` +@overload +def loadtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + dtype: None = ..., + comments: None | str | Sequence[str] = ..., + delimiter: None | str = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + skiprows: int = ..., + usecols: int | Sequence[int] = ..., + unpack: bool = ..., + ndmin: L[0, 1, 2] = ..., + encoding: None | str = ..., + max_rows: None | int = ..., + *, + quotechar: None | str = ..., + like: None | _SupportsArrayFunc = ... +) -> NDArray[float64]: ... +@overload +def loadtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + dtype: _DTypeLike[_SCT], + comments: None | str | Sequence[str] = ..., + delimiter: None | str = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + skiprows: int = ..., + usecols: int | Sequence[int] = ..., + unpack: bool = ..., + ndmin: L[0, 1, 2] = ..., + encoding: None | str = ..., + max_rows: None | int = ..., + *, + quotechar: None | str = ..., + like: None | _SupportsArrayFunc = ... +) -> NDArray[_SCT]: ... +@overload +def loadtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + dtype: DTypeLike, + comments: None | str | Sequence[str] = ..., + delimiter: None | str = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + skiprows: int = ..., + usecols: int | Sequence[int] = ..., + unpack: bool = ..., + ndmin: L[0, 1, 2] = ..., + encoding: None | str = ..., + max_rows: None | int = ..., + *, + quotechar: None | str = ..., + like: None | _SupportsArrayFunc = ... +) -> NDArray[Any]: ... + +def savetxt( + fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes], + X: ArrayLike, + fmt: str | Sequence[str] = ..., + delimiter: str = ..., + newline: str = ..., + header: str = ..., + footer: str = ..., + comments: str = ..., + encoding: None | str = ..., +) -> None: ... + +@overload +def fromregex( + file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes], + regexp: str | bytes | Pattern[Any], + dtype: _DTypeLike[_SCT], + encoding: None | str = ... +) -> NDArray[_SCT]: ... +@overload +def fromregex( + file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes], + regexp: str | bytes | Pattern[Any], + dtype: DTypeLike, + encoding: None | str = ... +) -> NDArray[Any]: ... + +@overload +def genfromtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + dtype: None = ..., + comments: str = ..., + delimiter: None | str | int | Iterable[int] = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: None | Sequence[int] = ..., + names: L[None, True] | str | Collection[str] = ..., + excludelist: None | Sequence[str] = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L['upper', 'lower'] = ..., + defaultfmt: str = ..., + unpack: None | bool = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: None | int = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... +@overload +def genfromtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + dtype: _DTypeLike[_SCT], + comments: str = ..., + delimiter: None | str | int | Iterable[int] = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: None | Sequence[int] = ..., + names: L[None, True] | str | Collection[str] = ..., + excludelist: None | Sequence[str] = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L['upper', 'lower'] = ..., + defaultfmt: str = ..., + unpack: None | bool = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: None | int = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def genfromtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + dtype: DTypeLike, + comments: str = ..., + delimiter: None | str | int | Iterable[int] = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: None | Sequence[int] = ..., + names: L[None, True] | str | Collection[str] = ..., + excludelist: None | Sequence[str] = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L['upper', 'lower'] = ..., + defaultfmt: str = ..., + unpack: None | bool = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: None | int = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def recfromtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + *, + usemask: L[False] = ..., + **kwargs: Any, +) -> recarray[Any, dtype[record]]: ... +@overload +def recfromtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + *, + usemask: L[True], + **kwargs: Any, +) -> MaskedRecords[Any, dtype[void]]: ... + +@overload +def recfromcsv( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + *, + usemask: L[False] = ..., + **kwargs: Any, +) -> recarray[Any, dtype[record]]: ... +@overload +def recfromcsv( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + *, + usemask: L[True], + **kwargs: Any, +) -> MaskedRecords[Any, dtype[void]]: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_polynomial_impl.py b/phivenv/Lib/site-packages/numpy/lib/_polynomial_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..161e51953afad466c5de078c06b63a5221647c19 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_polynomial_impl.py @@ -0,0 +1,1442 @@ +""" +Functions to operate on polynomials. + +""" +__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', + 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', + 'polyfit'] + +import functools +import re +import warnings + +from .._utils import set_module +import numpy._core.numeric as NX + +from numpy._core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array, + ones) +from numpy._core import overrides +from numpy.exceptions import RankWarning +from numpy.lib._twodim_base_impl import diag, vander +from numpy.lib._function_base_impl import trim_zeros +from numpy.lib._type_check_impl import iscomplex, real, imag, mintypecode +from numpy.linalg import eigvals, lstsq, inv + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _poly_dispatcher(seq_of_zeros): + return seq_of_zeros + + +@array_function_dispatch(_poly_dispatcher) +def poly(seq_of_zeros): + """ + Find the coefficients of a polynomial with the given sequence of roots. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Returns the coefficients of the polynomial whose leading coefficient + is one for the given sequence of zeros (multiple roots must be included + in the sequence as many times as their multiplicity; see Examples). + A square matrix (or array, which will be treated as a matrix) can also + be given, in which case the coefficients of the characteristic polynomial + of the matrix are returned. + + Parameters + ---------- + seq_of_zeros : array_like, shape (N,) or (N, N) + A sequence of polynomial roots, or a square array or matrix object. + + Returns + ------- + c : ndarray + 1D array of polynomial coefficients from highest to lowest degree: + + ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` + where c[0] always equals 1. + + Raises + ------ + ValueError + If input is the wrong shape (the input must be a 1-D or square + 2-D array). + + See Also + -------- + polyval : Compute polynomial values. + roots : Return the roots of a polynomial. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + Specifying the roots of a polynomial still leaves one degree of + freedom, typically represented by an undetermined leading + coefficient. [1]_ In the case of this function, that coefficient - + the first one in the returned array - is always taken as one. (If + for some reason you have one other point, the only automatic way + presently to leverage that information is to use ``polyfit``.) + + The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` + matrix **A** is given by + + :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, + + where **I** is the `n`-by-`n` identity matrix. [2]_ + + References + ---------- + .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry, + Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. + + .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," + Academic Press, pg. 182, 1980. + + Examples + -------- + Given a sequence of a polynomial's zeros: + + >>> np.poly((0, 0, 0)) # Multiple root example + array([1., 0., 0., 0.]) + + The line above represents z**3 + 0*z**2 + 0*z + 0. + + >>> np.poly((-1./2, 0, 1./2)) + array([ 1. , 0. , -0.25, 0. ]) + + The line above represents z**3 - z/4 + + >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0])) + array([ 1. , -0.77086955, 0.08618131, 0. ]) # random + + Given a square array object: + + >>> P = np.array([[0, 1./3], [-1./2, 0]]) + >>> np.poly(P) + array([1. , 0. , 0.16666667]) + + Note how in all cases the leading coefficient is always 1. + + """ + seq_of_zeros = atleast_1d(seq_of_zeros) + sh = seq_of_zeros.shape + + if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: + seq_of_zeros = eigvals(seq_of_zeros) + elif len(sh) == 1: + dt = seq_of_zeros.dtype + # Let object arrays slip through, e.g. for arbitrary precision + if dt != object: + seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) + else: + raise ValueError("input must be 1d or non-empty square 2d array.") + + if len(seq_of_zeros) == 0: + return 1.0 + dt = seq_of_zeros.dtype + a = ones((1,), dtype=dt) + for zero in seq_of_zeros: + a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full') + + if issubclass(a.dtype.type, NX.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = NX.asarray(seq_of_zeros, complex) + if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())): + a = a.real.copy() + + return a + + +def _roots_dispatcher(p): + return p + + +@array_function_dispatch(_roots_dispatcher) +def roots(p): + """ + Return the roots of a polynomial with coefficients given in p. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The values in the rank-1 array `p` are coefficients of a polynomial. + If the length of `p` is n+1 then the polynomial is described by:: + + p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] + + Parameters + ---------- + p : array_like + Rank-1 array of polynomial coefficients. + + Returns + ------- + out : ndarray + An array containing the roots of the polynomial. + + Raises + ------ + ValueError + When `p` cannot be converted to a rank-1 array. + + See also + -------- + poly : Find the coefficients of a polynomial with a given sequence + of roots. + polyval : Compute polynomial values. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + The algorithm relies on computing the eigenvalues of the + companion matrix [1]_. + + References + ---------- + .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: + Cambridge University Press, 1999, pp. 146-7. + + Examples + -------- + >>> coeff = [3.2, 2, 1] + >>> np.roots(coeff) + array([-0.3125+0.46351241j, -0.3125-0.46351241j]) + + """ + # If input is scalar, this makes it an array + p = atleast_1d(p) + if p.ndim != 1: + raise ValueError("Input must be a rank-1 array.") + + # find non-zero array entries + non_zero = NX.nonzero(NX.ravel(p))[0] + + # Return an empty array if polynomial is all zeros + if len(non_zero) == 0: + return NX.array([]) + + # find the number of trailing zeros -- this is the number of roots at 0. + trailing_zeros = len(p) - non_zero[-1] - 1 + + # strip leading and trailing zeros + p = p[int(non_zero[0]):int(non_zero[-1])+1] + + # casting: if incoming array isn't floating point, make it floating point. + if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): + p = p.astype(float) + + N = len(p) + if N > 1: + # build companion matrix and find its eigenvalues (the roots) + A = diag(NX.ones((N-2,), p.dtype), -1) + A[0,:] = -p[1:] / p[0] + roots = eigvals(A) + else: + roots = NX.array([]) + + # tack any zeros onto the back of the array + roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) + return roots + + +def _polyint_dispatcher(p, m=None, k=None): + return (p,) + + +@array_function_dispatch(_polyint_dispatcher) +def polyint(p, m=1, k=None): + """ + Return an antiderivative (indefinite integral) of a polynomial. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The returned order `m` antiderivative `P` of polynomial `p` satisfies + :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` + integration constants `k`. The constants determine the low-order + polynomial part + + .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} + + of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. + + Parameters + ---------- + p : array_like or poly1d + Polynomial to integrate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of the antiderivative. (Default: 1) + k : list of `m` scalars or scalar, optional + Integration constants. They are given in the order of integration: + those corresponding to highest-order terms come first. + + If ``None`` (default), all constants are assumed to be zero. + If `m = 1`, a single scalar can be given instead of a list. + + See Also + -------- + polyder : derivative of a polynomial + poly1d.integ : equivalent method + + Examples + -------- + The defining property of the antiderivative: + + >>> p = np.poly1d([1,1,1]) + >>> P = np.polyint(p) + >>> P + poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary + >>> np.polyder(P) == p + True + + The integration constants default to zero, but can be specified: + + >>> P = np.polyint(p, 3) + >>> P(0) + 0.0 + >>> np.polyder(P)(0) + 0.0 + >>> np.polyder(P, 2)(0) + 0.0 + >>> P = np.polyint(p, 3, k=[6,5,3]) + >>> P + poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary + + Note that 3 = 6 / 2!, and that the constants are given in the order of + integrations. Constant of the highest-order polynomial term comes first: + + >>> np.polyder(P, 2)(0) + 6.0 + >>> np.polyder(P, 1)(0) + 5.0 + >>> P(0) + 3.0 + + """ + m = int(m) + if m < 0: + raise ValueError("Order of integral must be positive (see polyder)") + if k is None: + k = NX.zeros(m, float) + k = atleast_1d(k) + if len(k) == 1 and m > 1: + k = k[0]*NX.ones(m, float) + if len(k) < m: + raise ValueError( + "k must be a scalar or a rank-1 array of length 1 or >m.") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + if m == 0: + if truepoly: + return poly1d(p) + return p + else: + # Note: this must work also with object and integer arrays + y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) + val = polyint(y, m - 1, k=k[1:]) + if truepoly: + return poly1d(val) + return val + + +def _polyder_dispatcher(p, m=None): + return (p,) + + +@array_function_dispatch(_polyder_dispatcher) +def polyder(p, m=1): + """ + Return the derivative of the specified order of a polynomial. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Parameters + ---------- + p : poly1d or sequence + Polynomial to differentiate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of differentiation (default: 1) + + Returns + ------- + der : poly1d + A new polynomial representing the derivative. + + See Also + -------- + polyint : Anti-derivative of a polynomial. + poly1d : Class for one-dimensional polynomials. + + Examples + -------- + The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: + + >>> p = np.poly1d([1,1,1,1]) + >>> p2 = np.polyder(p) + >>> p2 + poly1d([3, 2, 1]) + + which evaluates to: + + >>> p2(2.) + 17.0 + + We can verify this, approximating the derivative with + ``(f(x + h) - f(x))/h``: + + >>> (p(2. + 0.001) - p(2.)) / 0.001 + 17.007000999997857 + + The fourth-order derivative of a 3rd-order polynomial is zero: + + >>> np.polyder(p, 2) + poly1d([6, 2]) + >>> np.polyder(p, 3) + poly1d([6]) + >>> np.polyder(p, 4) + poly1d([0]) + + """ + m = int(m) + if m < 0: + raise ValueError("Order of derivative must be positive (see polyint)") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + n = len(p) - 1 + y = p[:-1] * NX.arange(n, 0, -1) + if m == 0: + val = p + else: + val = polyder(y, m - 1) + if truepoly: + val = poly1d(val) + return val + + +def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None): + return (x, y, w) + + +@array_function_dispatch(_polyfit_dispatcher) +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Least squares polynomial fit. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + to points `(x, y)`. Returns a vector of coefficients `p` that minimises + the squared error in the order `deg`, `deg-1`, ... `0`. + + The `Polynomial.fit ` class + method is recommended for new code as it is more stable numerically. See + the documentation of the method for more information. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (M,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + cov : bool or str, optional + If given and not `False`, return not just the estimate but also its + covariance matrix. By default, the covariance are scaled by + chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed + to be unreliable except in a relative sense and everything is scaled + such that the reduced chi2 is unity. This scaling is omitted if + ``cov='unscaled'``, as is relevant for the case that the weights are + w = 1/sigma, with sigma known to be a reliable estimate of the + uncertainty. + + Returns + ------- + p : ndarray, shape (deg + 1,) or (deg + 1, K) + Polynomial coefficients, highest power first. If `y` was 2-D, the + coefficients for `k`-th data set are in ``p[:,k]``. + + residuals, rank, singular_values, rcond + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the effective rank of the scaled Vandermonde + coefficient matrix + - singular_values -- singular values of the scaled Vandermonde + coefficient matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + V : ndarray, shape (deg + 1, deg + 1) or (deg + 1, deg + 1, K) + Present only if ``full == False`` and ``cov == True``. The covariance + matrix of the polynomial coefficient estimates. The diagonal of + this matrix are the variance estimates for each coefficient. If y + is a 2-D array, then the covariance matrix for the `k`-th data set + are in ``V[:,:,k]`` + + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. + + The warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + polyval : Compute polynomial values. + linalg.lstsq : Computes a least-squares fit. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution minimizes the squared error + + .. math:: + E = \\sum_{j=0}^k |p(x_j) - y_j|^2 + + in the equations:: + + x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] + x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] + ... + x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] + + The coefficient matrix of the coefficients `p` is a Vandermonde matrix. + + `polyfit` issues a `~exceptions.RankWarning` when the least-squares fit is + badly conditioned. This implies that the best fit is not well-defined due + to numerical error. The results may be improved by lowering the polynomial + degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter + can also be set to a value smaller than its default, but the resulting + fit may be spurious: including contributions from the small singular + values can add numerical noise to the result. + + Note that fitting polynomial coefficients is inherently badly conditioned + when the degree of the polynomial is large or the interval of sample points + is badly centered. The quality of the fit should always be checked in these + cases. When polynomial fits are not satisfactory, splines may be a good + alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + .. [2] Wikipedia, "Polynomial interpolation", + https://en.wikipedia.org/wiki/Polynomial_interpolation + + Examples + -------- + >>> import warnings + >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) + >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) + >>> z = np.polyfit(x, y, 3) + >>> z + array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary + + It is convenient to use `poly1d` objects for dealing with polynomials: + + >>> p = np.poly1d(z) + >>> p(0.5) + 0.6143849206349179 # may vary + >>> p(3.5) + -0.34732142857143039 # may vary + >>> p(10) + 22.579365079365115 # may vary + + High-order polynomials may oscillate wildly: + + >>> with warnings.catch_warnings(): + ... warnings.simplefilter('ignore', np.exceptions.RankWarning) + ... p30 = np.poly1d(np.polyfit(x, y, 30)) + ... + >>> p30(4) + -0.80000000000000204 # may vary + >>> p30(5) + -0.99999999999999445 # may vary + >>> p30(4.5) + -0.10547061179440398 # may vary + + Illustration: + + >>> import matplotlib.pyplot as plt + >>> xp = np.linspace(-2, 6, 100) + >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') + >>> plt.ylim(-2,2) + (-2, 2) + >>> plt.show() + + """ + order = int(deg) + 1 + x = NX.asarray(x) + 0.0 + y = NX.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if x.shape[0] != y.shape[0]: + raise TypeError("expected x and y to have same length") + + # set rcond + if rcond is None: + rcond = len(x)*finfo(x.dtype).eps + + # set up least squares equation for powers of x + lhs = vander(x, order) + rhs = y + + # apply weighting + if w is not None: + w = NX.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + lhs *= w[:, NX.newaxis] + if rhs.ndim == 2: + rhs *= w[:, NX.newaxis] + else: + rhs *= w + + # scale lhs to improve condition number and solve + scale = NX.sqrt((lhs*lhs).sum(axis=0)) + lhs /= scale + c, resids, rank, s = lstsq(lhs, rhs, rcond) + c = (c.T/scale).T # broadcast scale coefficients + + # warn on rank reduction, which indicates an ill conditioned matrix + if rank != order and not full: + msg = "Polyfit may be poorly conditioned" + warnings.warn(msg, RankWarning, stacklevel=2) + + if full: + return c, resids, rank, s, rcond + elif cov: + Vbase = inv(dot(lhs.T, lhs)) + Vbase /= NX.outer(scale, scale) + if cov == "unscaled": + fac = 1 + else: + if len(x) <= order: + raise ValueError("the number of data points must exceed order " + "to scale the covariance matrix") + # note, this used to be: fac = resids / (len(x) - order - 2.0) + # it was deciced that the "- 2" (originally justified by "Bayesian + # uncertainty analysis") is not what the user expects + # (see gh-11196 and gh-11197) + fac = resids / (len(x) - order) + if y.ndim == 1: + return c, Vbase * fac + else: + return c, Vbase[:,:, NX.newaxis] * fac + else: + return c + + +def _polyval_dispatcher(p, x): + return (p, x) + + +@array_function_dispatch(_polyval_dispatcher) +def polyval(p, x): + """ + Evaluate a polynomial at specific values. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + If `p` is of length N, this function returns the value:: + + p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1] + + If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``. + If `x` is another polynomial then the composite polynomial ``p(x(t))`` + is returned. + + Parameters + ---------- + p : array_like or poly1d object + 1D array of polynomial coefficients (including coefficients equal + to zero) from highest degree to the constant term, or an + instance of poly1d. + x : array_like or poly1d object + A number, an array of numbers, or an instance of poly1d, at + which to evaluate `p`. + + Returns + ------- + values : ndarray or poly1d + If `x` is a poly1d instance, the result is the composition of the two + polynomials, i.e., `x` is "substituted" in `p` and the simplified + result is returned. In addition, the type of `x` - array_like or + poly1d - governs the type of the output: `x` array_like => `values` + array_like, `x` a poly1d object => `values` is also. + + See Also + -------- + poly1d: A polynomial class. + + Notes + ----- + Horner's scheme [1]_ is used to evaluate the polynomial. Even so, + for polynomials of high degree the values may be inaccurate due to + rounding errors. Use carefully. + + If `x` is a subtype of `ndarray` the return value will be of the same type. + + References + ---------- + .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. + trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand + Reinhold Co., 1985, pg. 720. + + Examples + -------- + >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 + 76 + >>> np.polyval([3,0,1], np.poly1d(5)) + poly1d([76]) + >>> np.polyval(np.poly1d([3,0,1]), 5) + 76 + >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) + poly1d([76]) + + """ + p = NX.asarray(p) + if isinstance(x, poly1d): + y = 0 + else: + x = NX.asanyarray(x) + y = NX.zeros_like(x) + for pv in p: + y = y * x + pv + return y + + +def _binary_op_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_binary_op_dispatcher) +def polyadd(a1, a2): + """ + Find the sum of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Returns the polynomial resulting from the sum of two input polynomials. + Each input must be either a poly1d object or a 1D sequence of polynomial + coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The sum of the inputs. If either input is a poly1d object, then the + output is also a poly1d object. Otherwise, it is a 1D array of + polynomial coefficients from highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + + Examples + -------- + >>> np.polyadd([1, 2], [9, 5, 4]) + array([9, 6, 6]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2]) + >>> p2 = np.poly1d([9, 5, 4]) + >>> print(p1) + 1 x + 2 + >>> print(p2) + 2 + 9 x + 5 x + 4 + >>> print(np.polyadd(p1, p2)) + 2 + 9 x + 6 x + 6 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 + a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) + a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 + NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polysub(a1, a2): + """ + Difference (subtraction) of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Given two polynomials `a1` and `a2`, returns ``a1 - a2``. + `a1` and `a2` can be either array_like sequences of the polynomials' + coefficients (including coefficients equal to zero), or `poly1d` objects. + + Parameters + ---------- + a1, a2 : array_like or poly1d + Minuend and subtrahend polynomials, respectively. + + Returns + ------- + out : ndarray or poly1d + Array or `poly1d` object of the difference polynomial's coefficients. + + See Also + -------- + polyval, polydiv, polymul, polyadd + + Examples + -------- + .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) + + >>> np.polysub([2, 10, -2], [3, 10, -4]) + array([-1, 0, 2]) + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 - a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) - a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 - NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polymul(a1, a2): + """ + Find the product of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Finds the polynomial resulting from the multiplication of the two input + polynomials. Each input must be either a poly1d object or a 1D sequence + of polynomial coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The polynomial resulting from the multiplication of the inputs. If + either inputs is a poly1d object, then the output is also a poly1d + object. Otherwise, it is a 1D array of polynomial coefficients from + highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + convolve : Array convolution. Same output as polymul, but has parameter + for overlap mode. + + Examples + -------- + >>> np.polymul([1, 2, 3], [9, 5, 1]) + array([ 9, 23, 38, 17, 3]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2, 3]) + >>> p2 = np.poly1d([9, 5, 1]) + >>> print(p1) + 2 + 1 x + 2 x + 3 + >>> print(p2) + 2 + 9 x + 5 x + 1 + >>> print(np.polymul(p1, p2)) + 4 3 2 + 9 x + 23 x + 38 x + 17 x + 3 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1, a2 = poly1d(a1), poly1d(a2) + val = NX.convolve(a1, a2) + if truepoly: + val = poly1d(val) + return val + + +def _polydiv_dispatcher(u, v): + return (u, v) + + +@array_function_dispatch(_polydiv_dispatcher) +def polydiv(u, v): + """ + Returns the quotient and remainder of polynomial division. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The input arrays are the coefficients (including any coefficients + equal to zero) of the "numerator" (dividend) and "denominator" + (divisor) polynomials, respectively. + + Parameters + ---------- + u : array_like or poly1d + Dividend polynomial's coefficients. + + v : array_like or poly1d + Divisor polynomial's coefficients. + + Returns + ------- + q : ndarray + Coefficients, including those equal to zero, of the quotient. + r : ndarray + Coefficients, including those equal to zero, of the remainder. + + See Also + -------- + poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub + polyval + + Notes + ----- + Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need + not equal `v.ndim`. In other words, all four possible combinations - + ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, + ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. + + Examples + -------- + .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 + + >>> x = np.array([3.0, 5.0, 2.0]) + >>> y = np.array([2.0, 1.0]) + >>> np.polydiv(x, y) + (array([1.5 , 1.75]), array([0.25])) + + """ + truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d)) + u = atleast_1d(u) + 0.0 + v = atleast_1d(v) + 0.0 + # w has the common type + w = u[0] + v[0] + m = len(u) - 1 + n = len(v) - 1 + scale = 1. / v[0] + q = NX.zeros((max(m - n + 1, 1),), w.dtype) + r = u.astype(w.dtype) + for k in range(0, m-n+1): + d = scale * r[k] + q[k] = d + r[k:k+n+1] -= d*v + while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): + r = r[1:] + if truepoly: + return poly1d(q), poly1d(r) + return q, r + +_poly_mat = re.compile(r"\*\*([0-9]*)") +def _raise_power(astr, wrap=70): + n = 0 + line1 = '' + line2 = '' + output = ' ' + while True: + mat = _poly_mat.search(astr, n) + if mat is None: + break + span = mat.span() + power = mat.groups()[0] + partstr = astr[n:span[0]] + n = span[1] + toadd2 = partstr + ' '*(len(power)-1) + toadd1 = ' '*(len(partstr)-1) + power + if ((len(line2) + len(toadd2) > wrap) or + (len(line1) + len(toadd1) > wrap)): + output += line1 + "\n" + line2 + "\n " + line1 = toadd1 + line2 = toadd2 + else: + line2 += partstr + ' '*(len(power)-1) + line1 += ' '*(len(partstr)-1) + power + output += line1 + "\n" + line2 + return output + astr[n:] + + +@set_module('numpy') +class poly1d: + """ + A one-dimensional polynomial class. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + A convenience class, used to encapsulate "natural" operations on + polynomials so that said operations may take on their customary + form in code (see Examples). + + Parameters + ---------- + c_or_r : array_like + The polynomial's coefficients, in decreasing powers, or if + the value of the second parameter is True, the polynomial's + roots (values where the polynomial evaluates to 0). For example, + ``poly1d([1, 2, 3])`` returns an object that represents + :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns + one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. + r : bool, optional + If True, `c_or_r` specifies the polynomial's roots; the default + is False. + variable : str, optional + Changes the variable used when printing `p` from `x` to `variable` + (see Examples). + + Examples + -------- + Construct the polynomial :math:`x^2 + 2x + 3`: + + >>> p = np.poly1d([1, 2, 3]) + >>> print(np.poly1d(p)) + 2 + 1 x + 2 x + 3 + + Evaluate the polynomial at :math:`x = 0.5`: + + >>> p(0.5) + 4.25 + + Find the roots: + + >>> p.r + array([-1.+1.41421356j, -1.-1.41421356j]) + >>> p(p.r) + array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary + + These numbers in the previous line represent (0, 0) to machine precision + + Show the coefficients: + + >>> p.c + array([1, 2, 3]) + + Display the order (the leading zero-coefficients are removed): + + >>> p.order + 2 + + Show the coefficient of the k-th power in the polynomial + (which is equivalent to ``p.c[-(i+1)]``): + + >>> p[1] + 2 + + Polynomials can be added, subtracted, multiplied, and divided + (returns quotient and remainder): + + >>> p * p + poly1d([ 1, 4, 10, 12, 9]) + + >>> (p**3 + 4) / p + (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.])) + + ``asarray(p)`` gives the coefficient array, so polynomials can be + used in all functions that accept arrays: + + >>> p**2 # square of polynomial + poly1d([ 1, 4, 10, 12, 9]) + + >>> np.square(p) # square of individual coefficients + array([1, 4, 9]) + + The variable used in the string representation of `p` can be modified, + using the `variable` parameter: + + >>> p = np.poly1d([1,2,3], variable='z') + >>> print(p) + 2 + 1 z + 2 z + 3 + + Construct a polynomial from its roots: + + >>> np.poly1d([1, 2], True) + poly1d([ 1., -3., 2.]) + + This is the same polynomial as obtained by: + + >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) + poly1d([ 1, -3, 2]) + + """ + __hash__ = None + + @property + def coeffs(self): + """ The polynomial coefficients """ + return self._coeffs + + @coeffs.setter + def coeffs(self, value): + # allowing this makes p.coeffs *= 2 legal + if value is not self._coeffs: + raise AttributeError("Cannot set attribute") + + @property + def variable(self): + """ The name of the polynomial variable """ + return self._variable + + # calculated attributes + @property + def order(self): + """ The order or degree of the polynomial """ + return len(self._coeffs) - 1 + + @property + def roots(self): + """ The roots of the polynomial, where self(x) == 0 """ + return roots(self._coeffs) + + # our internal _coeffs property need to be backed by __dict__['coeffs'] for + # scipy to work correctly. + @property + def _coeffs(self): + return self.__dict__['coeffs'] + @_coeffs.setter + def _coeffs(self, coeffs): + self.__dict__['coeffs'] = coeffs + + # alias attributes + r = roots + c = coef = coefficients = coeffs + o = order + + def __init__(self, c_or_r, r=False, variable=None): + if isinstance(c_or_r, poly1d): + self._variable = c_or_r._variable + self._coeffs = c_or_r._coeffs + + if set(c_or_r.__dict__) - set(self.__dict__): + msg = ("In the future extra properties will not be copied " + "across when constructing one poly1d from another") + warnings.warn(msg, FutureWarning, stacklevel=2) + self.__dict__.update(c_or_r.__dict__) + + if variable is not None: + self._variable = variable + return + if r: + c_or_r = poly(c_or_r) + c_or_r = atleast_1d(c_or_r) + if c_or_r.ndim > 1: + raise ValueError("Polynomial must be 1d only.") + c_or_r = trim_zeros(c_or_r, trim='f') + if len(c_or_r) == 0: + c_or_r = NX.array([0], dtype=c_or_r.dtype) + self._coeffs = c_or_r + if variable is None: + variable = 'x' + self._variable = variable + + def __array__(self, t=None, copy=None): + if t: + return NX.asarray(self.coeffs, t, copy=copy) + else: + return NX.asarray(self.coeffs, copy=copy) + + def __repr__(self): + vals = repr(self.coeffs) + vals = vals[6:-1] + return "poly1d(%s)" % vals + + def __len__(self): + return self.order + + def __str__(self): + thestr = "0" + var = self.variable + + # Remove leading zeros + coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] + N = len(coeffs)-1 + + def fmt_float(q): + s = '%.4g' % q + if s.endswith('.0000'): + s = s[:-5] + return s + + for k, coeff in enumerate(coeffs): + if not iscomplex(coeff): + coefstr = fmt_float(real(coeff)) + elif real(coeff) == 0: + coefstr = '%sj' % fmt_float(imag(coeff)) + else: + coefstr = '(%s + %sj)' % (fmt_float(real(coeff)), + fmt_float(imag(coeff))) + + power = (N-k) + if power == 0: + if coefstr != '0': + newstr = '%s' % (coefstr,) + else: + if k == 0: + newstr = '0' + else: + newstr = '' + elif power == 1: + if coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = var + else: + newstr = '%s %s' % (coefstr, var) + else: + if coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = '%s**%d' % (var, power,) + else: + newstr = '%s %s**%d' % (coefstr, var, power) + + if k > 0: + if newstr != '': + if newstr.startswith('-'): + thestr = "%s - %s" % (thestr, newstr[1:]) + else: + thestr = "%s + %s" % (thestr, newstr) + else: + thestr = newstr + return _raise_power(thestr) + + def __call__(self, val): + return polyval(self.coeffs, val) + + def __neg__(self): + return poly1d(-self.coeffs) + + def __pos__(self): + return self + + def __mul__(self, other): + if isscalar(other): + return poly1d(self.coeffs * other) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __rmul__(self, other): + if isscalar(other): + return poly1d(other * self.coeffs) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __add__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __radd__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __pow__(self, val): + if not isscalar(val) or int(val) != val or val < 0: + raise ValueError("Power to non-negative integers only.") + res = [1] + for _ in range(val): + res = polymul(self.coeffs, res) + return poly1d(res) + + def __sub__(self, other): + other = poly1d(other) + return poly1d(polysub(self.coeffs, other.coeffs)) + + def __rsub__(self, other): + other = poly1d(other) + return poly1d(polysub(other.coeffs, self.coeffs)) + + def __div__(self, other): + if isscalar(other): + return poly1d(self.coeffs/other) + else: + other = poly1d(other) + return polydiv(self, other) + + __truediv__ = __div__ + + def __rdiv__(self, other): + if isscalar(other): + return poly1d(other/self.coeffs) + else: + other = poly1d(other) + return polydiv(other, self) + + __rtruediv__ = __rdiv__ + + def __eq__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + if self.coeffs.shape != other.coeffs.shape: + return False + return (self.coeffs == other.coeffs).all() + + def __ne__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + return not self.__eq__(other) + + + def __getitem__(self, val): + ind = self.order - val + if val > self.order: + return self.coeffs.dtype.type(0) + if val < 0: + return self.coeffs.dtype.type(0) + return self.coeffs[ind] + + def __setitem__(self, key, val): + ind = self.order - key + if key < 0: + raise ValueError("Does not support negative powers.") + if key > self.order: + zr = NX.zeros(key-self.order, self.coeffs.dtype) + self._coeffs = NX.concatenate((zr, self.coeffs)) + ind = 0 + self._coeffs[ind] = val + return + + def __iter__(self): + return iter(self.coeffs) + + def integ(self, m=1, k=0): + """ + Return an antiderivative (indefinite integral) of this polynomial. + + Refer to `polyint` for full documentation. + + See Also + -------- + polyint : equivalent function + + """ + return poly1d(polyint(self.coeffs, m=m, k=k)) + + def deriv(self, m=1): + """ + Return a derivative of this polynomial. + + Refer to `polyder` for full documentation. + + See Also + -------- + polyder : equivalent function + + """ + return poly1d(polyder(self.coeffs, m=m)) + +# Stuff to do on module import + +warnings.simplefilter('always', RankWarning) diff --git a/phivenv/Lib/site-packages/numpy/lib/_polynomial_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_polynomial_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4513c125d1eb65bb70d1f793549d4e6cf10b9956 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_polynomial_impl.pyi @@ -0,0 +1,302 @@ +from typing import ( + Literal as L, + overload, + Any, + SupportsInt, + SupportsIndex, + TypeVar, + NoReturn, +) + +import numpy as np +from numpy import ( + poly1d as poly1d, + unsignedinteger, + signedinteger, + floating, + complexfloating, + int32, + int64, + float64, + complex128, + object_, +) + +from numpy._typing import ( + NDArray, + ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeObject_co, +) + +_T = TypeVar("_T") + +_2Tup = tuple[_T, _T] +_5Tup = tuple[ + _T, + NDArray[float64], + NDArray[int32], + NDArray[float64], + NDArray[float64], +] + +__all__: list[str] + +def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ... + +# Returns either a float or complex array depending on the input values. +# See `np.linalg.eigvals`. +def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]: ... + +@overload +def polyint( + p: poly1d, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., +) -> poly1d: ... +@overload +def polyint( + p: _ArrayLikeFloat_co, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeFloat_co = ..., +) -> NDArray[floating[Any]]: ... +@overload +def polyint( + p: _ArrayLikeComplex_co, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeComplex_co = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polyint( + p: _ArrayLikeObject_co, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeObject_co = ..., +) -> NDArray[object_]: ... + +@overload +def polyder( + p: poly1d, + m: SupportsInt | SupportsIndex = ..., +) -> poly1d: ... +@overload +def polyder( + p: _ArrayLikeFloat_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[floating[Any]]: ... +@overload +def polyder( + p: _ArrayLikeComplex_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polyder( + p: _ArrayLikeObject_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[object_]: ... + +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[False] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: L[False] = ..., +) -> NDArray[float64]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[False] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: L[False] = ..., +) -> NDArray[complex128]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[False] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: L[True, "unscaled"] = ..., +) -> _2Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[False] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: L[True, "unscaled"] = ..., +) -> _2Tup[NDArray[complex128]]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[True] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[True] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[complex128]]: ... + +@overload +def polyval( + p: _ArrayLikeBool_co, + x: _ArrayLikeBool_co, +) -> NDArray[int64]: ... +@overload +def polyval( + p: _ArrayLikeUInt_co, + x: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def polyval( + p: _ArrayLikeInt_co, + x: _ArrayLikeInt_co, +) -> NDArray[signedinteger[Any]]: ... +@overload +def polyval( + p: _ArrayLikeFloat_co, + x: _ArrayLikeFloat_co, +) -> NDArray[floating[Any]]: ... +@overload +def polyval( + p: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polyval( + p: _ArrayLikeObject_co, + x: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +@overload +def polyadd( + a1: poly1d, + a2: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> poly1d: ... +@overload +def polyadd( + a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a2: poly1d, +) -> poly1d: ... +@overload +def polyadd( + a1: _ArrayLikeBool_co, + a2: _ArrayLikeBool_co, +) -> NDArray[np.bool]: ... +@overload +def polyadd( + a1: _ArrayLikeUInt_co, + a2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def polyadd( + a1: _ArrayLikeInt_co, + a2: _ArrayLikeInt_co, +) -> NDArray[signedinteger[Any]]: ... +@overload +def polyadd( + a1: _ArrayLikeFloat_co, + a2: _ArrayLikeFloat_co, +) -> NDArray[floating[Any]]: ... +@overload +def polyadd( + a1: _ArrayLikeComplex_co, + a2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polyadd( + a1: _ArrayLikeObject_co, + a2: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +@overload +def polysub( + a1: poly1d, + a2: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> poly1d: ... +@overload +def polysub( + a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a2: poly1d, +) -> poly1d: ... +@overload +def polysub( + a1: _ArrayLikeBool_co, + a2: _ArrayLikeBool_co, +) -> NoReturn: ... +@overload +def polysub( + a1: _ArrayLikeUInt_co, + a2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def polysub( + a1: _ArrayLikeInt_co, + a2: _ArrayLikeInt_co, +) -> NDArray[signedinteger[Any]]: ... +@overload +def polysub( + a1: _ArrayLikeFloat_co, + a2: _ArrayLikeFloat_co, +) -> NDArray[floating[Any]]: ... +@overload +def polysub( + a1: _ArrayLikeComplex_co, + a2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polysub( + a1: _ArrayLikeObject_co, + a2: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +# NOTE: Not an alias, but they do have the same signature (that we can reuse) +polymul = polyadd + +@overload +def polydiv( + u: poly1d, + v: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> _2Tup[poly1d]: ... +@overload +def polydiv( + u: _ArrayLikeComplex_co | _ArrayLikeObject_co, + v: poly1d, +) -> _2Tup[poly1d]: ... +@overload +def polydiv( + u: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, +) -> _2Tup[NDArray[floating[Any]]]: ... +@overload +def polydiv( + u: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, +) -> _2Tup[NDArray[complexfloating[Any, Any]]]: ... +@overload +def polydiv( + u: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, +) -> _2Tup[NDArray[Any]]: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_scimath_impl.py b/phivenv/Lib/site-packages/numpy/lib/_scimath_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..9af2d6088ba7a819ca7e2687c37dea14e3f8fd8e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_scimath_impl.py @@ -0,0 +1,636 @@ +""" +Wrapper functions to more user-friendly calling of certain math functions +whose output data-type is different than the input data-type in certain +domains of the input. + +For example, for functions like `log` with branch cuts, the versions in this +module provide the mathematically valid answers in the complex plane:: + + >>> import math + >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi) + True + +Similarly, `sqrt`, other base logarithms, `power` and trig functions are +correctly handled. See their respective docstrings for specific examples. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + sqrt + log + log2 + logn + log10 + power + arccos + arcsin + arctanh + +""" +import numpy._core.numeric as nx +import numpy._core.numerictypes as nt +from numpy._core.numeric import asarray, any +from numpy._core.overrides import array_function_dispatch +from numpy.lib._type_check_impl import isreal + + +__all__ = [ + 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', + 'arctanh' + ] + + +_ln2 = nx.log(2.0) + + +def _tocomplex(arr): + """Convert its input `arr` to a complex array. + + The input is returned as a complex array of the smallest type that will fit + the original data: types like single, byte, short, etc. become csingle, + while others become cdouble. + + A copy of the input is always made. + + Parameters + ---------- + arr : array + + Returns + ------- + array + An array with the same input data as the input but in complex form. + + Examples + -------- + + First, consider an input of type short: + + >>> a = np.array([1,2,3],np.short) + + >>> ac = np.lib.scimath._tocomplex(a); ac + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> ac.dtype + dtype('complex64') + + If the input is of type double, the output is correspondingly of the + complex double type as well: + + >>> b = np.array([1,2,3],np.double) + + >>> bc = np.lib.scimath._tocomplex(b); bc + array([1.+0.j, 2.+0.j, 3.+0.j]) + + >>> bc.dtype + dtype('complex128') + + Note that even if the input was complex to begin with, a copy is still + made, since the astype() method always copies: + + >>> c = np.array([1,2,3],np.csingle) + + >>> cc = np.lib.scimath._tocomplex(c); cc + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> c *= 2; c + array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) + + >>> cc + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + """ + if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, + nt.ushort, nt.csingle)): + return arr.astype(nt.csingle) + else: + return arr.astype(nt.cdouble) + + +def _fix_real_lt_zero(x): + """Convert `x` to complex if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_real_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_real_lt_zero([-1,2]) + array([-1.+0.j, 2.+0.j]) + + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = _tocomplex(x) + return x + + +def _fix_int_lt_zero(x): + """Convert `x` to double if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_int_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_int_lt_zero([-1,2]) + array([-1., 2.]) + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = x * 1.0 + return x + + +def _fix_real_abs_gt_1(x): + """Convert `x` to complex if it has real components x_i with abs(x_i)>1. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) + array([0, 1]) + + >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) + array([0.+0.j, 2.+0.j]) + """ + x = asarray(x) + if any(isreal(x) & (abs(x) > 1)): + x = _tocomplex(x) + return x + + +def _unary_dispatcher(x): + return (x,) + + +@array_function_dispatch(_unary_dispatcher) +def sqrt(x): + """ + Compute the square root of x. + + For negative input elements, a complex value is returned + (unlike `numpy.sqrt` which returns NaN). + + Parameters + ---------- + x : array_like + The input value(s). + + Returns + ------- + out : ndarray or scalar + The square root of `x`. If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.sqrt + + Examples + -------- + For real, non-negative inputs this works just like `numpy.sqrt`: + + >>> np.emath.sqrt(1) + 1.0 + >>> np.emath.sqrt([1, 4]) + array([1., 2.]) + + But it automatically handles negative inputs: + + >>> np.emath.sqrt(-1) + 1j + >>> np.emath.sqrt([-1,4]) + array([0.+1.j, 2.+0.j]) + + Different results are expected because: + floating point 0.0 and -0.0 are distinct. + + For more control, explicitly use complex() as follows: + + >>> np.emath.sqrt(complex(-4.0, 0.0)) + 2j + >>> np.emath.sqrt(complex(-4.0, -0.0)) + -2j + """ + x = _fix_real_lt_zero(x) + return nx.sqrt(x) + + +@array_function_dispatch(_unary_dispatcher) +def log(x): + """ + Compute the natural logarithm of `x`. + + Return the "principal value" (for a description of this, see `numpy.log`) + of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)`` + returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the + complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log is (are) required. + + Returns + ------- + out : ndarray or scalar + The log of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log + + Notes + ----- + For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log` + (note, however, that otherwise `numpy.log` and this `log` are identical, + i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, + notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + >>> np.emath.log(np.exp(1)) + 1.0 + + Negative arguments are handled "correctly" (recall that + ``exp(log(x)) == x`` does *not* hold for real ``x < 0``): + + >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j) + True + + """ + x = _fix_real_lt_zero(x) + return nx.log(x) + + +@array_function_dispatch(_unary_dispatcher) +def log10(x): + """ + Compute the logarithm base 10 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this + is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)`` + returns ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose log base 10 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array object is returned. + + See Also + -------- + numpy.log10 + + Notes + ----- + For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10` + (note, however, that otherwise `numpy.log10` and this `log10` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + + (We set the printing precision so the example can be auto-tested) + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log10(10**1) + 1.0 + + >>> np.emath.log10([-10**1, -10**2, 10**2]) + array([1.+1.3644j, 2.+1.3644j, 2.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log10(x) + + +def _logn_dispatcher(n, x): + return (n, x,) + + +@array_function_dispatch(_logn_dispatcher) +def logn(n, x): + """ + Take log base n of x. + + If `x` contains negative inputs, the answer is computed and returned in the + complex domain. + + Parameters + ---------- + n : array_like + The integer base(s) in which the log is taken. + x : array_like + The value(s) whose log base `n` is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base `n` of the `x` value(s). If `x` was a scalar, so is + `out`, otherwise an array is returned. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.logn(2, [4, 8]) + array([2., 3.]) + >>> np.emath.logn(2, [-4, -8, 8]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + n = _fix_real_lt_zero(n) + return nx.log(x)/nx.log(n) + + +@array_function_dispatch(_unary_dispatcher) +def log2(x): + """ + Compute the logarithm base 2 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is + a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns + ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log base 2 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log2 + + Notes + ----- + For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2` + (note, however, that otherwise `numpy.log2` and this `log2` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + We set the printing precision so the example can be auto-tested: + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log2(8) + 3.0 + >>> np.emath.log2([-4, -8, 8]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log2(x) + + +def _power_dispatcher(x, p): + return (x, p) + + +@array_function_dispatch(_power_dispatcher) +def power(x, p): + """ + Return x to the power p, (x**p). + + If `x` contains negative values, the output is converted to the + complex domain. + + Parameters + ---------- + x : array_like + The input value(s). + p : array_like of ints + The power(s) to which `x` is raised. If `x` contains multiple values, + `p` has to either be a scalar, or contain the same number of values + as `x`. In the latter case, the result is + ``x[0]**p[0], x[1]**p[1], ...``. + + Returns + ------- + out : ndarray or scalar + The result of ``x**p``. If `x` and `p` are scalars, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.power + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.power(2, 2) + 4 + + >>> np.emath.power([2, 4], 2) + array([ 4, 16]) + + >>> np.emath.power([2, 4], -2) + array([0.25 , 0.0625]) + + >>> np.emath.power([-2, 4], 2) + array([ 4.-0.j, 16.+0.j]) + + >>> np.emath.power([2, 4], [2, 4]) + array([ 4, 256]) + + """ + x = _fix_real_lt_zero(x) + p = _fix_int_lt_zero(p) + return nx.power(x, p) + + +@array_function_dispatch(_unary_dispatcher) +def arccos(x): + """ + Compute the inverse cosine of x. + + Return the "principal value" (for a description of this, see + `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[0, \\pi]`. Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arccos is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arccos + + Notes + ----- + For an arccos() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arccos`. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.arccos(1) # a scalar is returned + 0.0 + + >>> np.emath.arccos([1,2]) + array([0.-0.j , 0.-1.317j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arccos(x) + + +@array_function_dispatch(_unary_dispatcher) +def arcsin(x): + """ + Compute the inverse sine of x. + + Return the "principal value" (for a description of this, see + `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is + returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arcsin is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse sine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arcsin + + Notes + ----- + For an arcsin() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arcsin`. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.arcsin(0) + 0.0 + + >>> np.emath.arcsin([0,1]) + array([0. , 1.5708]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arcsin(x) + + +@array_function_dispatch(_unary_dispatcher) +def arctanh(x): + """ + Compute the inverse hyperbolic tangent of `x`. + + Return the "principal value" (for a description of this, see + `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that + ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is + complex, the result is complex. Finally, `x = 1` returns``inf`` and + ``x=-1`` returns ``-inf``. + + Parameters + ---------- + x : array_like + The value(s) whose arctanh is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was + a scalar so is `out`, otherwise an array is returned. + + + See Also + -------- + numpy.arctanh + + Notes + ----- + For an arctanh() that returns ``NAN`` when real `x` is not in the + interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does + return +/-inf for ``x = +/-1``). + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.arctanh(0.5) + 0.5493061443340549 + + >>> from numpy.testing import suppress_warnings + >>> with suppress_warnings() as sup: + ... sup.filter(RuntimeWarning) + ... np.emath.arctanh(np.eye(2)) + array([[inf, 0.], + [ 0., inf]]) + >>> np.emath.arctanh([1j]) + array([0.+0.7854j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arctanh(x) diff --git a/phivenv/Lib/site-packages/numpy/lib/_scimath_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_scimath_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ef5cf4e4ed4f968bec2dc869a0b37ee93d862ff6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_scimath_impl.pyi @@ -0,0 +1,94 @@ +from typing import overload, Any + +from numpy import complexfloating + +from numpy._typing import ( + NDArray, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ComplexLike_co, + _FloatLike_co, +) + +__all__: list[str] + +@overload +def sqrt(x: _FloatLike_co) -> Any: ... +@overload +def sqrt(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def log(x: _FloatLike_co) -> Any: ... +@overload +def log(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def log10(x: _FloatLike_co) -> Any: ... +@overload +def log10(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def log2(x: _FloatLike_co) -> Any: ... +@overload +def log2(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ... +@overload +def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ... +@overload +def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def arccos(x: _FloatLike_co) -> Any: ... +@overload +def arccos(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def arcsin(x: _FloatLike_co) -> Any: ... +@overload +def arcsin(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def arctanh(x: _FloatLike_co) -> Any: ... +@overload +def arctanh(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_shape_base_impl.py b/phivenv/Lib/site-packages/numpy/lib/_shape_base_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..eb1293cb12667fae1ee4598715d558f24df0d950 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_shape_base_impl.py @@ -0,0 +1,1286 @@ +import functools +import warnings + +import numpy._core.numeric as _nx +from numpy._core.numeric import asarray, zeros, zeros_like, array, asanyarray +from numpy._core.fromnumeric import reshape, transpose +from numpy._core.multiarray import normalize_axis_index +from numpy._core._multiarray_umath import _array_converter +from numpy._core import overrides +from numpy._core import vstack, atleast_3d +from numpy._core.numeric import normalize_axis_tuple +from numpy._core.overrides import set_module +from numpy._core.shape_base import _arrays_for_stack_dispatcher +from numpy.lib._index_tricks_impl import ndindex +from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells + + +__all__ = [ + 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', + 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', + 'apply_along_axis', 'kron', 'tile', 'take_along_axis', + 'put_along_axis' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _make_along_axis_idx(arr_shape, indices, axis): + # compute dimensions to iterate over + if not _nx.issubdtype(indices.dtype, _nx.integer): + raise IndexError('`indices` must be an integer array') + if len(arr_shape) != indices.ndim: + raise ValueError( + "`indices` and `arr` must have the same number of dimensions") + shape_ones = (1,) * indices.ndim + dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim)) + + # build a fancy index, consisting of orthogonal aranges, with the + # requested index inserted at the right location + fancy_index = [] + for dim, n in zip(dest_dims, arr_shape): + if dim is None: + fancy_index.append(indices) + else: + ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:] + fancy_index.append(_nx.arange(n).reshape(ind_shape)) + + return tuple(fancy_index) + + +def _take_along_axis_dispatcher(arr, indices, axis): + return (arr, indices) + + +@array_function_dispatch(_take_along_axis_dispatcher) +def take_along_axis(arr, indices, axis): + """ + Take values from the input array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to look up values in the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + arr : ndarray (Ni..., M, Nk...) + Source array + indices : ndarray (Ni..., J, Nk...) + Indices to take along each 1d slice of `arr`. This must match the + dimension of arr, but dimensions Ni and Nj only need to broadcast + against `arr`. + axis : int + The axis to take 1d slices along. If axis is None, the input array is + treated as if it had first been flattened to 1d, for consistency with + `sort` and `argsort`. + + Returns + ------- + out: ndarray (Ni..., J, Nk...) + The indexed result. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + out = np.empty(Ni + (J,) + Nk) + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + out_1d = out [ii + s_[:,] + kk] + for j in range(J): + out_1d[j] = a_1d[indices_1d[j]] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + out_1d[:] = a_1d[indices_1d] + + See Also + -------- + take : Take along an axis, using the same indices for every 1d slice + put_along_axis : + Put values into the destination array by matching 1d index and data slices + + Examples + -------- + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can sort either by using sort directly, or argsort and this function + + >>> np.sort(a, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + >>> ai = np.argsort(a, axis=1) + >>> ai + array([[0, 2, 1], + [1, 2, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + + The same works for max and min, if you maintain the trivial dimension + with ``keepdims``: + + >>> np.max(a, axis=1, keepdims=True) + array([[30], + [60]]) + >>> ai = np.argmax(a, axis=1, keepdims=True) + >>> ai + array([[1], + [0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[30], + [60]]) + + If we want to get the max and min at the same time, we can stack the + indices first + + >>> ai_min = np.argmin(a, axis=1, keepdims=True) + >>> ai_max = np.argmax(a, axis=1, keepdims=True) + >>> ai = np.concatenate([ai_min, ai_max], axis=1) + >>> ai + array([[0, 1], + [1, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 30], + [40, 60]]) + """ + # normalize inputs + if axis is None: + arr = arr.flat + arr_shape = (len(arr),) # flatiter has no .shape + axis = 0 + else: + axis = normalize_axis_index(axis, arr.ndim) + arr_shape = arr.shape + + # use the fancy index + return arr[_make_along_axis_idx(arr_shape, indices, axis)] + + +def _put_along_axis_dispatcher(arr, indices, values, axis): + return (arr, indices, values) + + +@array_function_dispatch(_put_along_axis_dispatcher) +def put_along_axis(arr, indices, values, axis): + """ + Put values into the destination array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to place values into the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + arr : ndarray (Ni..., M, Nk...) + Destination array. + indices : ndarray (Ni..., J, Nk...) + Indices to change along each 1d slice of `arr`. This must match the + dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast + against `arr`. + values : array_like (Ni..., J, Nk...) + values to insert at those indices. Its shape and dimension are + broadcast to match that of `indices`. + axis : int + The axis to take 1d slices along. If axis is None, the destination + array is treated as if a flattened 1d view had been created of it. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + values_1d = values [ii + s_[:,] + kk] + for j in range(J): + a_1d[indices_1d[j]] = values_1d[j] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + a_1d[indices_1d] = values_1d + + See Also + -------- + take_along_axis : + Take values from the input array by matching 1d index and data slices + + Examples + -------- + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can replace the maximum values with: + + >>> ai = np.argmax(a, axis=1, keepdims=True) + >>> ai + array([[1], + [0]]) + >>> np.put_along_axis(a, ai, 99, axis=1) + >>> a + array([[10, 99, 20], + [99, 40, 50]]) + + """ + # normalize inputs + if axis is None: + arr = arr.flat + axis = 0 + arr_shape = (len(arr),) # flatiter has no .shape + else: + axis = normalize_axis_index(axis, arr.ndim) + arr_shape = arr.shape + + # use the fancy index + arr[_make_along_axis_idx(arr_shape, indices, axis)] = values + + +def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs): + return (arr,) + + +@array_function_dispatch(_apply_along_axis_dispatcher) +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + Apply a function to 1-D slices along the given axis. + + Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays + and `a` is a 1-D slice of `arr` along `axis`. + + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + f = func1d(arr[ii + s_[:,] + kk]) + Nj = f.shape + for jj in ndindex(Nj): + out[ii + jj + kk] = f[jj] + + Equivalently, eliminating the inner loop, this can be expressed as:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk]) + + Parameters + ---------- + func1d : function (M,) -> (Nj...) + This function should accept 1-D arrays. It is applied to 1-D + slices of `arr` along the specified axis. + axis : integer + Axis along which `arr` is sliced. + arr : ndarray (Ni..., M, Nk...) + Input array. + args : any + Additional arguments to `func1d`. + kwargs : any + Additional named arguments to `func1d`. + + .. versionadded:: 1.9.0 + + + Returns + ------- + out : ndarray (Ni..., Nj..., Nk...) + The output array. The shape of `out` is identical to the shape of + `arr`, except along the `axis` dimension. This axis is removed, and + replaced with new dimensions equal to the shape of the return value + of `func1d`. So if `func1d` returns a scalar `out` will have one + fewer dimensions than `arr`. + + See Also + -------- + apply_over_axes : Apply a function repeatedly over multiple axes. + + Examples + -------- + >>> def my_func(a): + ... \"\"\"Average first and last element of a 1-D array\"\"\" + ... return (a[0] + a[-1]) * 0.5 + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(my_func, 0, b) + array([4., 5., 6.]) + >>> np.apply_along_axis(my_func, 1, b) + array([2., 5., 8.]) + + For a function that returns a 1D array, the number of dimensions in + `outarr` is the same as `arr`. + + >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]]) + >>> np.apply_along_axis(sorted, 1, b) + array([[1, 7, 8], + [3, 4, 9], + [2, 5, 6]]) + + For a function that returns a higher dimensional array, those dimensions + are inserted in place of the `axis` dimension. + + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(np.diag, -1, b) + array([[[1, 0, 0], + [0, 2, 0], + [0, 0, 3]], + [[4, 0, 0], + [0, 5, 0], + [0, 0, 6]], + [[7, 0, 0], + [0, 8, 0], + [0, 0, 9]]]) + """ + # handle negative axes + conv = _array_converter(arr) + arr = conv[0] + + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + + # arr, with the iteration axis at the end + in_dims = list(range(nd)) + inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis]) + + # compute indices for the iteration axes, and append a trailing ellipsis to + # prevent 0d arrays decaying to scalars, which fixes gh-8642 + inds = ndindex(inarr_view.shape[:-1]) + inds = (ind + (Ellipsis,) for ind in inds) + + # invoke the function on the first item + try: + ind0 = next(inds) + except StopIteration: + raise ValueError( + 'Cannot apply_along_axis when any iteration dimensions are 0' + ) from None + res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs)) + + # build a buffer for storing evaluations of func1d. + # remove the requested axis, and add the new ones on the end. + # laid out so that each write is contiguous. + # for a tuple index inds, buff[inds] = func1d(inarr_view[inds]) + if not isinstance(res, matrix): + buff = zeros_like(res, shape=inarr_view.shape[:-1] + res.shape) + else: + # Matrices are nasty with reshaping, so do not preserve them here. + buff = zeros(inarr_view.shape[:-1] + res.shape, dtype=res.dtype) + + # permutation of axes such that out = buff.transpose(buff_permute) + buff_dims = list(range(buff.ndim)) + buff_permute = ( + buff_dims[0 : axis] + + buff_dims[buff.ndim-res.ndim : buff.ndim] + + buff_dims[axis : buff.ndim-res.ndim] + ) + + # save the first result, then compute and save all remaining results + buff[ind0] = res + for ind in inds: + buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs)) + + res = transpose(buff, buff_permute) + return conv.wrap(res) + + +def _apply_over_axes_dispatcher(func, a, axes): + return (a,) + + +@array_function_dispatch(_apply_over_axes_dispatcher) +def apply_over_axes(func, a, axes): + """ + Apply a function repeatedly over multiple axes. + + `func` is called as `res = func(a, axis)`, where `axis` is the first + element of `axes`. The result `res` of the function call must have + either the same dimensions as `a` or one less dimension. If `res` + has one less dimension than `a`, a dimension is inserted before + `axis`. The call to `func` is then repeated for each axis in `axes`, + with `res` as the first argument. + + Parameters + ---------- + func : function + This function must take two arguments, `func(a, axis)`. + a : array_like + Input array. + axes : array_like + Axes over which `func` is applied; the elements must be integers. + + Returns + ------- + apply_over_axis : ndarray + The output array. The number of dimensions is the same as `a`, + but the shape can be different. This depends on whether `func` + changes the shape of its output with respect to its input. + + See Also + -------- + apply_along_axis : + Apply a function to 1-D slices of an array along the given axis. + + Notes + ----- + This function is equivalent to tuple axis arguments to reorderable ufuncs + with keepdims=True. Tuple axis arguments to ufuncs have been available since + version 1.7.0. + + Examples + -------- + >>> a = np.arange(24).reshape(2,3,4) + >>> a + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]]) + + Sum over axes 0 and 2. The result has same number of dimensions + as the original array: + + >>> np.apply_over_axes(np.sum, a, [0,2]) + array([[[ 60], + [ 92], + [124]]]) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.sum(a, axis=(0,2), keepdims=True) + array([[[ 60], + [ 92], + [124]]]) + + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + + +def _expand_dims_dispatcher(a, axis): + return (a,) + + +@array_function_dispatch(_expand_dims_dispatcher) +def expand_dims(a, axis): + """ + Expand the shape of an array. + + Insert a new axis that will appear at the `axis` position in the expanded + array shape. + + Parameters + ---------- + a : array_like + Input array. + axis : int or tuple of ints + Position in the expanded axes where the new axis (or axes) is placed. + + .. deprecated:: 1.13.0 + Passing an axis where ``axis > a.ndim`` will be treated as + ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will + be treated as ``axis == 0``. This behavior is deprecated. + + .. versionchanged:: 1.18.0 + A tuple of axes is now supported. Out of range axes as + described above are now forbidden and raise an + `~exceptions.AxisError`. + + Returns + ------- + result : ndarray + View of `a` with the number of dimensions increased. + + See Also + -------- + squeeze : The inverse operation, removing singleton dimensions + reshape : Insert, remove, and combine dimensions, and resize existing ones + atleast_1d, atleast_2d, atleast_3d + + Examples + -------- + >>> x = np.array([1, 2]) + >>> x.shape + (2,) + + The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``: + + >>> y = np.expand_dims(x, axis=0) + >>> y + array([[1, 2]]) + >>> y.shape + (1, 2) + + The following is equivalent to ``x[:, np.newaxis]``: + + >>> y = np.expand_dims(x, axis=1) + >>> y + array([[1], + [2]]) + >>> y.shape + (2, 1) + + ``axis`` may also be a tuple: + + >>> y = np.expand_dims(x, axis=(0, 1)) + >>> y + array([[[1, 2]]]) + + >>> y = np.expand_dims(x, axis=(2, 0)) + >>> y + array([[[1], + [2]]]) + + Note that some examples may use ``None`` instead of ``np.newaxis``. These + are the same objects: + + >>> np.newaxis is None + True + + """ + if isinstance(a, matrix): + a = asarray(a) + else: + a = asanyarray(a) + + if type(axis) not in (tuple, list): + axis = (axis,) + + out_ndim = len(axis) + a.ndim + axis = normalize_axis_tuple(axis, out_ndim) + + shape_it = iter(a.shape) + shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)] + + return a.reshape(shape) + + +# NOTE: Remove once deprecation period passes +@set_module("numpy") +def row_stack(tup, *, dtype=None, casting="same_kind"): + # Deprecated in NumPy 2.0, 2023-08-18 + warnings.warn( + "`row_stack` alias is deprecated. " + "Use `np.vstack` directly.", + DeprecationWarning, + stacklevel=2 + ) + return vstack(tup, dtype=dtype, casting=casting) + + +row_stack.__doc__ = vstack.__doc__ + + +def _column_stack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_column_stack_dispatcher) +def column_stack(tup): + """ + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + + """ + arrays = [] + for v in tup: + arr = asanyarray(v) + if arr.ndim < 2: + arr = array(arr, copy=None, subok=True, ndmin=2).T + arrays.append(arr) + return _nx.concatenate(arrays, 1) + + +def _dstack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_dstack_dispatcher) +def dstack(tup): + """ + Stack arrays in sequence depth wise (along third axis). + + This is equivalent to concatenation along the third axis after 2-D arrays + of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape + `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by + `dsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of arrays + The arrays must have the same shape along all but the third axis. + 1-D or 2-D arrays must have the same shape. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays, will be at least 3-D. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + vstack : Stack arrays in sequence vertically (row wise). + hstack : Stack arrays in sequence horizontally (column wise). + column_stack : Stack 1-D arrays as columns into a 2-D array. + dsplit : Split array along third axis. + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.dstack((a,b)) + array([[[1, 2], + [2, 3], + [3, 4]]]) + + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[2],[3],[4]]) + >>> np.dstack((a,b)) + array([[[1, 2]], + [[2, 3]], + [[3, 4]]]) + + """ + arrs = atleast_3d(*tup) + if not isinstance(arrs, tuple): + arrs = (arrs,) + return _nx.concatenate(arrs, 2) + + +def _replace_zero_by_x_arrays(sub_arys): + for i in range(len(sub_arys)): + if _nx.ndim(sub_arys[i]) == 0: + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + return sub_arys + + +def _array_split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_array_split_dispatcher) +def array_split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays. + + Please refer to the ``split`` documentation. The only difference + between these functions is that ``array_split`` allows + `indices_or_sections` to be an integer that does *not* equally + divide the axis. For an array of length l that should be split + into n sections, it returns l % n sub-arrays of size l//n + 1 + and the rest of size l//n. + + See Also + -------- + split : Split array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(8.0) + >>> np.array_split(x, 3) + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] + + >>> x = np.arange(9) + >>> np.array_split(x, 4) + [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])] + + """ + try: + Ntotal = ary.shape[axis] + except AttributeError: + Ntotal = len(ary) + try: + # handle array case. + Nsections = len(indices_or_sections) + 1 + div_points = [0] + list(indices_or_sections) + [Ntotal] + except TypeError: + # indices_or_sections is a scalar, not an array. + Nsections = int(indices_or_sections) + if Nsections <= 0: + raise ValueError('number sections must be larger than 0.') from None + Neach_section, extras = divmod(Ntotal, Nsections) + section_sizes = ([0] + + extras * [Neach_section+1] + + (Nsections-extras) * [Neach_section]) + div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum() + + sub_arys = [] + sary = _nx.swapaxes(ary, axis, 0) + for i in range(Nsections): + st = div_points[i] + end = div_points[i + 1] + sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) + + return sub_arys + + +def _split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_split_dispatcher) +def split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays as views into `ary`. + + Parameters + ---------- + ary : ndarray + Array to be divided into sub-arrays. + indices_or_sections : int or 1-D array + If `indices_or_sections` is an integer, N, the array will be divided + into N equal arrays along `axis`. If such a split is not possible, + an error is raised. + + If `indices_or_sections` is a 1-D array of sorted integers, the entries + indicate where along `axis` the array is split. For example, + ``[2, 3]`` would, for ``axis=0``, result in + + - ary[:2] + - ary[2:3] + - ary[3:] + + If an index exceeds the dimension of the array along `axis`, + an empty sub-array is returned correspondingly. + axis : int, optional + The axis along which to split, default is 0. + + Returns + ------- + sub-arrays : list of ndarrays + A list of sub-arrays as views into `ary`. + + Raises + ------ + ValueError + If `indices_or_sections` is given as an integer, but + a split does not result in equal division. + + See Also + -------- + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. Does not raise an exception if + an equal division cannot be made. + hsplit : Split array into multiple sub-arrays horizontally (column-wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + + Examples + -------- + >>> x = np.arange(9.0) + >>> np.split(x, 3) + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] + + >>> x = np.arange(8.0) + >>> np.split(x, [3, 5, 6, 10]) + [array([0., 1., 2.]), + array([3., 4.]), + array([5.]), + array([6., 7.]), + array([], dtype=float64)] + + """ + try: + len(indices_or_sections) + except TypeError: + sections = indices_or_sections + N = ary.shape[axis] + if N % sections: + raise ValueError( + 'array split does not result in an equal division') from None + return array_split(ary, indices_or_sections, axis) + + +def _hvdsplit_dispatcher(ary, indices_or_sections): + return (ary, indices_or_sections) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def hsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays horizontally (column-wise). + + Please refer to the `split` documentation. `hsplit` is equivalent + to `split` with ``axis=1``, the array is always split along the second + axis except for 1-D arrays, where it is split at ``axis=0``. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> np.hsplit(x, 2) + [array([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [12., 13.]]), + array([[ 2., 3.], + [ 6., 7.], + [10., 11.], + [14., 15.]])] + >>> np.hsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + array([[ 3.], + [ 7.], + [11.], + [15.]]), + array([], shape=(4, 0), dtype=float64)] + + With a higher dimensional array the split is still along the second axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) + >>> np.hsplit(x, 2) + [array([[[0., 1.]], + [[4., 5.]]]), + array([[[2., 3.]], + [[6., 7.]]])] + + With a 1-D array, the split is along axis 0. + + >>> x = np.array([0, 1, 2, 3, 4, 5]) + >>> np.hsplit(x, 2) + [array([0, 1, 2]), array([3, 4, 5])] + + """ + if _nx.ndim(ary) == 0: + raise ValueError('hsplit only works on arrays of 1 or more dimensions') + if ary.ndim > 1: + return split(ary, indices_or_sections, 1) + else: + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def vsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays vertically (row-wise). + + Please refer to the ``split`` documentation. ``vsplit`` is equivalent + to ``split`` with `axis=0` (default), the array is always split along the + first axis regardless of the array dimension. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> np.vsplit(x, 2) + [array([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), + array([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])] + >>> np.vsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), + array([[12., 13., 14., 15.]]), + array([], shape=(0, 4), dtype=float64)] + + With a higher dimensional array the split is still along the first axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) + >>> np.vsplit(x, 2) + [array([[[0., 1.], + [2., 3.]]]), + array([[[4., 5.], + [6., 7.]]])] + + """ + if _nx.ndim(ary) < 2: + raise ValueError('vsplit only works on arrays of 2 or more dimensions') + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def dsplit(ary, indices_or_sections): + """ + Split array into multiple sub-arrays along the 3rd axis (depth). + + Please refer to the `split` documentation. `dsplit` is equivalent + to `split` with ``axis=2``, the array is always split along the third + axis provided the array dimension is greater than or equal to 3. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(2, 2, 4) + >>> x + array([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) + >>> np.dsplit(x, 2) + [array([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), array([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])] + >>> np.dsplit(x, np.array([3, 6])) + [array([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + array([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + array([], shape=(2, 2, 0), dtype=float64)] + """ + if _nx.ndim(ary) < 3: + raise ValueError('dsplit only works on arrays of 3 or more dimensions') + return split(ary, indices_or_sections, 2) + + +def get_array_wrap(*args): + """Find the wrapper for the array with the highest priority. + + In case of ties, leftmost wins. If no wrapper is found, return None. + + .. deprecated:: 2.0 + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`get_array_wrap` is deprecated. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + wrappers = sorted((getattr(x, '__array_priority__', 0), -i, + x.__array_wrap__) for i, x in enumerate(args) + if hasattr(x, '__array_wrap__')) + if wrappers: + return wrappers[-1][-1] + return None + + +def _kron_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_kron_dispatcher) +def kron(a, b): + """ + Kronecker product of two arrays. + + Computes the Kronecker product, a composite array made of blocks of the + second array scaled by the first. + + Parameters + ---------- + a, b : array_like + + Returns + ------- + out : ndarray + + See Also + -------- + outer : The outer product + + Notes + ----- + The function assumes that the number of dimensions of `a` and `b` + are the same, if necessary prepending the smallest with ones. + If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``, + the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``. + The elements are products of elements from `a` and `b`, organized + explicitly by:: + + kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] + + where:: + + kt = it * st + jt, t = 0,...,N + + In the common 2-D case (N=1), the block structure can be visualized:: + + [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], + [ ... ... ], + [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] + + + Examples + -------- + >>> np.kron([1,10,100], [5,6,7]) + array([ 5, 6, 7, ..., 500, 600, 700]) + >>> np.kron([5,6,7], [1,10,100]) + array([ 5, 50, 500, ..., 7, 70, 700]) + + >>> np.kron(np.eye(2), np.ones((2,2))) + array([[1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.]]) + + >>> a = np.arange(100).reshape((2,5,2,5)) + >>> b = np.arange(24).reshape((2,3,4)) + >>> c = np.kron(a,b) + >>> c.shape + (2, 10, 6, 20) + >>> I = (1,3,0,2) + >>> J = (0,2,1) + >>> J1 = (0,) + J # extend to ndim=4 + >>> S1 = (1,) + b.shape + >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) + >>> c[K] == a[I]*b[J] + True + + """ + # Working: + # 1. Equalise the shapes by prepending smaller array with 1s + # 2. Expand shapes of both the arrays by adding new axes at + # odd positions for 1st array and even positions for 2nd + # 3. Compute the product of the modified array + # 4. The inner most array elements now contain the rows of + # the Kronecker product + # 5. Reshape the result to kron's shape, which is same as + # product of shapes of the two arrays. + b = asanyarray(b) + a = array(a, copy=None, subok=True, ndmin=b.ndim) + is_any_mat = isinstance(a, matrix) or isinstance(b, matrix) + ndb, nda = b.ndim, a.ndim + nd = max(ndb, nda) + + if (nda == 0 or ndb == 0): + return _nx.multiply(a, b) + + as_ = a.shape + bs = b.shape + if not a.flags.contiguous: + a = reshape(a, as_) + if not b.flags.contiguous: + b = reshape(b, bs) + + # Equalise the shapes by prepending smaller one with 1s + as_ = (1,)*max(0, ndb-nda) + as_ + bs = (1,)*max(0, nda-ndb) + bs + + # Insert empty dimensions + a_arr = expand_dims(a, axis=tuple(range(ndb-nda))) + b_arr = expand_dims(b, axis=tuple(range(nda-ndb))) + + # Compute the product + a_arr = expand_dims(a_arr, axis=tuple(range(1, nd*2, 2))) + b_arr = expand_dims(b_arr, axis=tuple(range(0, nd*2, 2))) + # In case of `mat`, convert result to `array` + result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat)) + + # Reshape back + result = result.reshape(_nx.multiply(as_, bs)) + + return result if not is_any_mat else matrix(result, copy=False) + + +def _tile_dispatcher(A, reps): + return (A, reps) + + +@array_function_dispatch(_tile_dispatcher) +def tile(A, reps): + """ + Construct an array by repeating A the number of times given by reps. + + If `reps` has length ``d``, the result will have dimension of + ``max(d, A.ndim)``. + + If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new + axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, + or shape (1, 1, 3) for 3-D replication. If this is not the desired + behavior, promote `A` to d-dimensions manually before calling this + function. + + If ``A.ndim > d``, `reps` is promoted to `A`.ndim by prepending 1's to it. + Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as + (1, 1, 2, 2). + + Note : Although tile may be used for broadcasting, it is strongly + recommended to use numpy's broadcasting operations and functions. + + Parameters + ---------- + A : array_like + The input array. + reps : array_like + The number of repetitions of `A` along each axis. + + Returns + ------- + c : ndarray + The tiled output array. + + See Also + -------- + repeat : Repeat elements of an array. + broadcast_to : Broadcast an array to a new shape + + Examples + -------- + >>> a = np.array([0, 1, 2]) + >>> np.tile(a, 2) + array([0, 1, 2, 0, 1, 2]) + >>> np.tile(a, (2, 2)) + array([[0, 1, 2, 0, 1, 2], + [0, 1, 2, 0, 1, 2]]) + >>> np.tile(a, (2, 1, 2)) + array([[[0, 1, 2, 0, 1, 2]], + [[0, 1, 2, 0, 1, 2]]]) + + >>> b = np.array([[1, 2], [3, 4]]) + >>> np.tile(b, 2) + array([[1, 2, 1, 2], + [3, 4, 3, 4]]) + >>> np.tile(b, (2, 1)) + array([[1, 2], + [3, 4], + [1, 2], + [3, 4]]) + + >>> c = np.array([1,2,3,4]) + >>> np.tile(c,(4,1)) + array([[1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4]]) + """ + try: + tup = tuple(reps) + except TypeError: + tup = (reps,) + d = len(tup) + if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray): + # Fixes the problem that the function does not make a copy if A is a + # numpy array and the repetitions are 1 in all dimensions + return _nx.array(A, copy=True, subok=True, ndmin=d) + else: + # Note that no copy of zero-sized arrays is made. However since they + # have no data there is no risk of an inadvertent overwrite. + c = _nx.array(A, copy=None, subok=True, ndmin=d) + if (d < c.ndim): + tup = (1,)*(c.ndim-d) + tup + shape_out = tuple(s*t for s, t in zip(c.shape, tup)) + n = c.size + if n > 0: + for dim_in, nrep in zip(c.shape, tup): + if nrep != 1: + c = c.reshape(-1, n).repeat(nrep, 0) + n //= dim_in + return c.reshape(shape_out) diff --git a/phivenv/Lib/site-packages/numpy/lib/_shape_base_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_shape_base_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e9d6b4c90f23cb70a15792ac076cf6092376648f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_shape_base_impl.pyi @@ -0,0 +1,202 @@ +import sys +from collections.abc import Callable, Sequence +from typing import TypeVar, Any, overload, SupportsIndex, Protocol + +if sys.version_info >= (3, 10): + from typing import ParamSpec, Concatenate +else: + from typing_extensions import ParamSpec, Concatenate + +import numpy as np +from numpy import ( + generic, + integer, + ufunc, + unsignedinteger, + signedinteger, + floating, + complexfloating, + object_, +) + +from numpy._typing import ( + ArrayLike, + NDArray, + _ShapeLike, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeObject_co, +) + +from numpy._core.shape_base import vstack + +_P = ParamSpec("_P") +_SCT = TypeVar("_SCT", bound=generic) + +# Signature of `__array_wrap__` +class _ArrayWrap(Protocol): + def __call__( + self, + array: NDArray[Any], + context: None | tuple[ufunc, tuple[Any, ...], int] = ..., + return_scalar: bool = ..., + /, + ) -> Any: ... + +class _SupportsArrayWrap(Protocol): + @property + def __array_wrap__(self) -> _ArrayWrap: ... + + +__all__: list[str] + +def take_along_axis( + arr: _SCT | NDArray[_SCT], + indices: NDArray[integer[Any]], + axis: None | int, +) -> NDArray[_SCT]: ... + +def put_along_axis( + arr: NDArray[_SCT], + indices: NDArray[integer[Any]], + values: ArrayLike, + axis: None | int, +) -> None: ... + +@overload +def apply_along_axis( + func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_SCT]], + axis: SupportsIndex, + arr: ArrayLike, + *args: _P.args, + **kwargs: _P.kwargs, +) -> NDArray[_SCT]: ... +@overload +def apply_along_axis( + func1d: Callable[Concatenate[NDArray[Any], _P], ArrayLike], + axis: SupportsIndex, + arr: ArrayLike, + *args: _P.args, + **kwargs: _P.kwargs, +) -> NDArray[Any]: ... + +def apply_over_axes( + func: Callable[[NDArray[Any], int], NDArray[_SCT]], + a: ArrayLike, + axes: int | Sequence[int], +) -> NDArray[_SCT]: ... + +@overload +def expand_dims( + a: _ArrayLike[_SCT], + axis: _ShapeLike, +) -> NDArray[_SCT]: ... +@overload +def expand_dims( + a: ArrayLike, + axis: _ShapeLike, +) -> NDArray[Any]: ... + +@overload +def column_stack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ... +@overload +def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... + +@overload +def dstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ... +@overload +def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... + +@overload +def array_split( + ary: _ArrayLike[_SCT], + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[_SCT]]: ... +@overload +def array_split( + ary: ArrayLike, + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[Any]]: ... + +@overload +def split( + ary: _ArrayLike[_SCT], + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[_SCT]]: ... +@overload +def split( + ary: ArrayLike, + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[Any]]: ... + +@overload +def hsplit( + ary: _ArrayLike[_SCT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_SCT]]: ... +@overload +def hsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def vsplit( + ary: _ArrayLike[_SCT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_SCT]]: ... +@overload +def vsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def dsplit( + ary: _ArrayLike[_SCT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_SCT]]: ... +@overload +def dsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ... +@overload +def get_array_wrap(*args: object) -> None | _ArrayWrap: ... + +@overload +def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ... +@overload +def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ... + +@overload +def tile( + A: _ArrayLike[_SCT], + reps: int | Sequence[int], +) -> NDArray[_SCT]: ... +@overload +def tile( + A: ArrayLike, + reps: int | Sequence[int], +) -> NDArray[Any]: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_stride_tricks_impl.py b/phivenv/Lib/site-packages/numpy/lib/_stride_tricks_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..a1dcaf194927d869668f9a76aa34249305767dde --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_stride_tricks_impl.py @@ -0,0 +1,558 @@ +""" +Utilities that manipulate strides to achieve desirable effects. + +An explanation of strides can be found in the :ref:`arrays.ndarray`. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + +""" +import numpy as np +from numpy._core.numeric import normalize_axis_tuple +from numpy._core.overrides import array_function_dispatch, set_module + +__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes'] + + +class DummyArray: + """Dummy object that just exists to hang __array_interface__ dictionaries + and possibly keep alive a reference to a base array. + """ + + def __init__(self, interface, base=None): + self.__array_interface__ = interface + self.base = base + + +def _maybe_view_as_subclass(original_array, new_array): + if type(original_array) is not type(new_array): + # if input was an ndarray subclass and subclasses were OK, + # then view the result as that subclass. + new_array = new_array.view(type=type(original_array)) + # Since we have done something akin to a view from original_array, we + # should let the subclass finalize (if it has it implemented, i.e., is + # not None). + if new_array.__array_finalize__: + new_array.__array_finalize__(original_array) + return new_array + + +@set_module("numpy.lib.stride_tricks") +def as_strided(x, shape=None, strides=None, subok=False, writeable=True): + """ + Create a view into the array with the given shape and strides. + + .. warning:: This function has to be used with extreme care, see notes. + + Parameters + ---------- + x : ndarray + Array to create a new. + shape : sequence of int, optional + The shape of the new array. Defaults to ``x.shape``. + strides : sequence of int, optional + The strides of the new array. Defaults to ``x.strides``. + subok : bool, optional + .. versionadded:: 1.10 + + If True, subclasses are preserved. + writeable : bool, optional + .. versionadded:: 1.12 + + If set to False, the returned array will always be readonly. + Otherwise it will be writable if the original array was. It + is advisable to set this to False if possible (see Notes). + + Returns + ------- + view : ndarray + + See also + -------- + broadcast_to : broadcast an array to a given shape. + reshape : reshape an array. + lib.stride_tricks.sliding_window_view : + userfriendly and safe function for a creation of sliding window views. + + Notes + ----- + ``as_strided`` creates a view into the array given the exact strides + and shape. This means it manipulates the internal data structure of + ndarray and, if done incorrectly, the array elements can point to + invalid memory and can corrupt results or crash your program. + It is advisable to always use the original ``x.strides`` when + calculating new strides to avoid reliance on a contiguous memory + layout. + + Furthermore, arrays created with this function often contain self + overlapping memory, so that two elements are identical. + Vectorized write operations on such arrays will typically be + unpredictable. They may even give different results for small, large, + or transposed arrays. + + Since writing to these arrays has to be tested and done with great + care, you may want to use ``writeable=False`` to avoid accidental write + operations. + + For these reasons it is advisable to avoid ``as_strided`` when + possible. + """ + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=None, subok=subok) + interface = dict(x.__array_interface__) + if shape is not None: + interface['shape'] = tuple(shape) + if strides is not None: + interface['strides'] = tuple(strides) + + array = np.asarray(DummyArray(interface, base=x)) + # The route via `__interface__` does not preserve structured + # dtypes. Since dtype should remain unchanged, we set it explicitly. + array.dtype = x.dtype + + view = _maybe_view_as_subclass(x, array) + + if view.flags.writeable and not writeable: + view.flags.writeable = False + + return view + + +def _sliding_window_view_dispatcher(x, window_shape, axis=None, *, + subok=None, writeable=None): + return (x,) + + +@array_function_dispatch( + _sliding_window_view_dispatcher, module="numpy.lib.stride_tricks" +) +def sliding_window_view(x, window_shape, axis=None, *, + subok=False, writeable=False): + """ + Create a sliding window view into the array with the given window shape. + + Also known as rolling or moving window, the window slides across all + dimensions of the array and extracts subsets of the array at all window + positions. + + .. versionadded:: 1.20.0 + + Parameters + ---------- + x : array_like + Array to create the sliding window view from. + window_shape : int or tuple of int + Size of window over each axis that takes part in the sliding window. + If `axis` is not present, must have same length as the number of input + array dimensions. Single integers `i` are treated as if they were the + tuple `(i,)`. + axis : int or tuple of int, optional + Axis or axes along which the sliding window is applied. + By default, the sliding window is applied to all axes and + `window_shape[i]` will refer to axis `i` of `x`. + If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to + the axis `axis[i]` of `x`. + Single integers `i` are treated as if they were the tuple `(i,)`. + subok : bool, optional + If True, sub-classes will be passed-through, otherwise the returned + array will be forced to be a base-class array (default). + writeable : bool, optional + When true, allow writing to the returned view. The default is false, + as this should be used with caution: the returned view contains the + same memory location multiple times, so writing to one location will + cause others to change. + + Returns + ------- + view : ndarray + Sliding window view of the array. The sliding window dimensions are + inserted at the end, and the original dimensions are trimmed as + required by the size of the sliding window. + That is, ``view.shape = x_shape_trimmed + window_shape``, where + ``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less + than the corresponding window size. + + See Also + -------- + lib.stride_tricks.as_strided: A lower-level and less safe routine for + creating arbitrary views from custom shape and strides. + broadcast_to: broadcast an array to a given shape. + + Notes + ----- + For many applications using a sliding window view can be convenient, but + potentially very slow. Often specialized solutions exist, for example: + + - `scipy.signal.fftconvolve` + + - filtering functions in `scipy.ndimage` + + - moving window functions provided by + `bottleneck `_. + + As a rough estimate, a sliding window approach with an input size of `N` + and a window size of `W` will scale as `O(N*W)` where frequently a special + algorithm can achieve `O(N)`. That means that the sliding window variant + for a window size of 100 can be a 100 times slower than a more specialized + version. + + Nevertheless, for small window sizes, when no custom algorithm exists, or + as a prototyping and developing tool, this function can be a good solution. + + Examples + -------- + >>> from numpy.lib.stride_tricks import sliding_window_view + >>> x = np.arange(6) + >>> x.shape + (6,) + >>> v = sliding_window_view(x, 3) + >>> v.shape + (4, 3) + >>> v + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [3, 4, 5]]) + + This also works in more dimensions, e.g. + + >>> i, j = np.ogrid[:3, :4] + >>> x = 10*i + j + >>> x.shape + (3, 4) + >>> x + array([[ 0, 1, 2, 3], + [10, 11, 12, 13], + [20, 21, 22, 23]]) + >>> shape = (2,2) + >>> v = sliding_window_view(x, shape) + >>> v.shape + (2, 3, 2, 2) + >>> v + array([[[[ 0, 1], + [10, 11]], + [[ 1, 2], + [11, 12]], + [[ 2, 3], + [12, 13]]], + [[[10, 11], + [20, 21]], + [[11, 12], + [21, 22]], + [[12, 13], + [22, 23]]]]) + + The axis can be specified explicitly: + + >>> v = sliding_window_view(x, 3, 0) + >>> v.shape + (1, 4, 3) + >>> v + array([[[ 0, 10, 20], + [ 1, 11, 21], + [ 2, 12, 22], + [ 3, 13, 23]]]) + + The same axis can be used several times. In that case, every use reduces + the corresponding original dimension: + + >>> v = sliding_window_view(x, (2, 3), (1, 1)) + >>> v.shape + (3, 1, 2, 3) + >>> v + array([[[[ 0, 1, 2], + [ 1, 2, 3]]], + [[[10, 11, 12], + [11, 12, 13]]], + [[[20, 21, 22], + [21, 22, 23]]]]) + + Combining with stepped slicing (`::step`), this can be used to take sliding + views which skip elements: + + >>> x = np.arange(7) + >>> sliding_window_view(x, 5)[:, ::2] + array([[0, 2, 4], + [1, 3, 5], + [2, 4, 6]]) + + or views which move by multiple elements + + >>> x = np.arange(7) + >>> sliding_window_view(x, 3)[::2, :] + array([[0, 1, 2], + [2, 3, 4], + [4, 5, 6]]) + + A common application of `sliding_window_view` is the calculation of running + statistics. The simplest example is the + `moving average `_: + + >>> x = np.arange(6) + >>> x.shape + (6,) + >>> v = sliding_window_view(x, 3) + >>> v.shape + (4, 3) + >>> v + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [3, 4, 5]]) + >>> moving_average = v.mean(axis=-1) + >>> moving_average + array([1., 2., 3., 4.]) + + Note that a sliding window approach is often **not** optimal (see Notes). + """ + window_shape = (tuple(window_shape) + if np.iterable(window_shape) + else (window_shape,)) + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=None, subok=subok) + + window_shape_array = np.array(window_shape) + if np.any(window_shape_array < 0): + raise ValueError('`window_shape` cannot contain negative values') + + if axis is None: + axis = tuple(range(x.ndim)) + if len(window_shape) != len(axis): + raise ValueError(f'Since axis is `None`, must provide ' + f'window_shape for all dimensions of `x`; ' + f'got {len(window_shape)} window_shape elements ' + f'and `x.ndim` is {x.ndim}.') + else: + axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True) + if len(window_shape) != len(axis): + raise ValueError(f'Must provide matching length window_shape and ' + f'axis; got {len(window_shape)} window_shape ' + f'elements and {len(axis)} axes elements.') + + out_strides = x.strides + tuple(x.strides[ax] for ax in axis) + + # note: same axis can be windowed repeatedly + x_shape_trimmed = list(x.shape) + for ax, dim in zip(axis, window_shape): + if x_shape_trimmed[ax] < dim: + raise ValueError( + 'window shape cannot be larger than input array shape') + x_shape_trimmed[ax] -= dim - 1 + out_shape = tuple(x_shape_trimmed) + window_shape + return as_strided(x, strides=out_strides, shape=out_shape, + subok=subok, writeable=writeable) + + +def _broadcast_to(array, shape, subok, readonly): + shape = tuple(shape) if np.iterable(shape) else (shape,) + array = np.array(array, copy=None, subok=subok) + if not shape and array.shape: + raise ValueError('cannot broadcast a non-scalar to a scalar array') + if any(size < 0 for size in shape): + raise ValueError('all elements of broadcast shape must be non-' + 'negative') + extras = [] + it = np.nditer( + (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras, + op_flags=['readonly'], itershape=shape, order='C') + with it: + # never really has writebackifcopy semantics + broadcast = it.itviews[0] + result = _maybe_view_as_subclass(array, broadcast) + # In a future version this will go away + if not readonly and array.flags._writeable_no_warn: + result.flags.writeable = True + result.flags._warn_on_write = True + return result + + +def _broadcast_to_dispatcher(array, shape, subok=None): + return (array,) + + +@array_function_dispatch(_broadcast_to_dispatcher, module='numpy') +def broadcast_to(array, shape, subok=False): + """Broadcast an array to a new shape. + + Parameters + ---------- + array : array_like + The array to broadcast. + shape : tuple or int + The shape of the desired array. A single integer ``i`` is interpreted + as ``(i,)``. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + + Returns + ------- + broadcast : array + A readonly view on the original array with the given shape. It is + typically not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. + + Raises + ------ + ValueError + If the array is not compatible with the new shape according to NumPy's + broadcasting rules. + + See Also + -------- + broadcast + broadcast_arrays + broadcast_shapes + + Notes + ----- + .. versionadded:: 1.10.0 + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> np.broadcast_to(x, (3, 3)) + array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) + """ + return _broadcast_to(array, shape, subok=subok, readonly=True) + + +def _broadcast_shape(*args): + """Returns the shape of the arrays that would result from broadcasting the + supplied arrays against each other. + """ + # use the old-iterator because np.nditer does not handle size 0 arrays + # consistently + b = np.broadcast(*args[:32]) + # unfortunately, it cannot handle 32 or more arguments directly + for pos in range(32, len(args), 31): + # ironically, np.broadcast does not properly handle np.broadcast + # objects (it treats them as scalars) + # use broadcasting to avoid allocating the full array + b = broadcast_to(0, b.shape) + b = np.broadcast(b, *args[pos:(pos + 31)]) + return b.shape + + +@set_module('numpy') +def broadcast_shapes(*args): + """ + Broadcast the input shapes into a single shape. + + :ref:`Learn more about broadcasting here `. + + .. versionadded:: 1.20.0 + + Parameters + ---------- + *args : tuples of ints, or ints + The shapes to be broadcast against each other. + + Returns + ------- + tuple + Broadcasted shape. + + Raises + ------ + ValueError + If the shapes are not compatible and cannot be broadcast according + to NumPy's broadcasting rules. + + See Also + -------- + broadcast + broadcast_arrays + broadcast_to + + Examples + -------- + >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2)) + (3, 2) + + >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) + (5, 6, 7) + """ + arrays = [np.empty(x, dtype=[]) for x in args] + return _broadcast_shape(*arrays) + + +def _broadcast_arrays_dispatcher(*args, subok=None): + return args + + +@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy') +def broadcast_arrays(*args, subok=False): + """ + Broadcast any number of arrays against each other. + + Parameters + ---------- + *args : array_likes + The arrays to broadcast. + + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned arrays will be forced to be a base-class array (default). + + Returns + ------- + broadcasted : tuple of arrays + These arrays are views on the original arrays. They are typically + not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. If you need + to write to the arrays, make copies first. While you can set the + ``writable`` flag True, writing to a single output value may end up + changing more than one location in the output array. + + .. deprecated:: 1.17 + The output is currently marked so that if written to, a deprecation + warning will be emitted. A future version will set the + ``writable`` flag False so writing to it will raise an error. + + See Also + -------- + broadcast + broadcast_to + broadcast_shapes + + Examples + -------- + >>> x = np.array([[1,2,3]]) + >>> y = np.array([[4],[5]]) + >>> np.broadcast_arrays(x, y) + (array([[1, 2, 3], + [1, 2, 3]]), + array([[4, 4, 4], + [5, 5, 5]])) + + Here is a useful idiom for getting contiguous copies instead of + non-contiguous views. + + >>> [np.array(a) for a in np.broadcast_arrays(x, y)] + [array([[1, 2, 3], + [1, 2, 3]]), + array([[4, 4, 4], + [5, 5, 5]])] + + """ + # nditer is not used here to avoid the limit of 32 arrays. + # Otherwise, something like the following one-liner would suffice: + # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], + # order='C').itviews + + args = tuple(np.array(_m, copy=None, subok=subok) for _m in args) + + shape = _broadcast_shape(*args) + + if all(array.shape == shape for array in args): + # Common case where nothing needs to be broadcasted. + return args + + return tuple(_broadcast_to(array, shape, subok=subok, readonly=False) + for array in args) diff --git a/phivenv/Lib/site-packages/numpy/lib/_stride_tricks_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_stride_tricks_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a5170b6fac8ee6a9068964bd6c514a16e7698b2c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_stride_tricks_impl.pyi @@ -0,0 +1,80 @@ +from collections.abc import Iterable +from typing import Any, TypeVar, overload, SupportsIndex + +from numpy import generic +from numpy._typing import ( + NDArray, + ArrayLike, + _ShapeLike, + _Shape, + _ArrayLike +) + +_SCT = TypeVar("_SCT", bound=generic) + +__all__: list[str] + +class DummyArray: + __array_interface__: dict[str, Any] + base: None | NDArray[Any] + def __init__( + self, + interface: dict[str, Any], + base: None | NDArray[Any] = ..., + ) -> None: ... + +@overload +def as_strided( + x: _ArrayLike[_SCT], + shape: None | Iterable[int] = ..., + strides: None | Iterable[int] = ..., + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def as_strided( + x: ArrayLike, + shape: None | Iterable[int] = ..., + strides: None | Iterable[int] = ..., + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[Any]: ... + +@overload +def sliding_window_view( + x: _ArrayLike[_SCT], + window_shape: int | Iterable[int], + axis: None | SupportsIndex = ..., + *, + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def sliding_window_view( + x: ArrayLike, + window_shape: int | Iterable[int], + axis: None | SupportsIndex = ..., + *, + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[Any]: ... + +@overload +def broadcast_to( + array: _ArrayLike[_SCT], + shape: int | Iterable[int], + subok: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def broadcast_to( + array: ArrayLike, + shape: int | Iterable[int], + subok: bool = ..., +) -> NDArray[Any]: ... + +def broadcast_shapes(*args: _ShapeLike) -> _Shape: ... + +def broadcast_arrays( + *args: ArrayLike, + subok: bool = ..., +) -> tuple[NDArray[Any], ...]: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_twodim_base_impl.py b/phivenv/Lib/site-packages/numpy/lib/_twodim_base_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..529859a254edb187571650084dc1b4e6e3ee6e24 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_twodim_base_impl.py @@ -0,0 +1,1188 @@ +""" Basic functions for manipulating 2d arrays + +""" +import functools +import operator + +from numpy._core._multiarray_umath import _array_converter +from numpy._core.numeric import ( + asanyarray, arange, zeros, greater_equal, multiply, ones, + asarray, where, int8, int16, int32, int64, intp, empty, promote_types, + diagonal, nonzero, indices + ) +from numpy._core.overrides import set_array_function_like_doc, set_module +from numpy._core import overrides +from numpy._core import iinfo +from numpy.lib._stride_tricks_impl import broadcast_to + + +__all__ = [ + 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu', + 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices', + 'tril_indices_from', 'triu_indices', 'triu_indices_from', ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +i1 = iinfo(int8) +i2 = iinfo(int16) +i4 = iinfo(int32) + + +def _min_int(low, high): + """ get small int that fits the range """ + if high <= i1.max and low >= i1.min: + return int8 + if high <= i2.max and low >= i2.min: + return int16 + if high <= i4.max and low >= i4.min: + return int32 + return int64 + + +def _flip_dispatcher(m): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def fliplr(m): + """ + Reverse the order of elements along axis 1 (left/right). + + For a 2-D array, this flips the entries in each row in the left/right + direction. Columns are preserved, but appear in a different order than + before. + + Parameters + ---------- + m : array_like + Input array, must be at least 2-D. + + Returns + ------- + f : ndarray + A view of `m` with the columns reversed. Since a view + is returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + flipud : Flip array in the up/down direction. + flip : Flip array in one or more dimensions. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``. + Requires the array to be at least 2-D. + + Examples + -------- + >>> A = np.diag([1.,2.,3.]) + >>> A + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) + >>> np.fliplr(A) + array([[0., 0., 1.], + [0., 2., 0.], + [3., 0., 0.]]) + + >>> A = np.random.randn(2,3,5) + >>> np.all(np.fliplr(A) == A[:,::-1,...]) + True + + """ + m = asanyarray(m) + if m.ndim < 2: + raise ValueError("Input must be >= 2-d.") + return m[:, ::-1] + + +@array_function_dispatch(_flip_dispatcher) +def flipud(m): + """ + Reverse the order of elements along axis 0 (up/down). + + For a 2-D array, this flips the entries in each column in the up/down + direction. Rows are preserved, but appear in a different order than before. + + Parameters + ---------- + m : array_like + Input array. + + Returns + ------- + out : array_like + A view of `m` with the rows reversed. Since a view is + returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + fliplr : Flip array in the left/right direction. + flip : Flip array in one or more dimensions. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``. + Requires the array to be at least 1-D. + + Examples + -------- + >>> A = np.diag([1.0, 2, 3]) + >>> A + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) + >>> np.flipud(A) + array([[0., 0., 3.], + [0., 2., 0.], + [1., 0., 0.]]) + + >>> A = np.random.randn(2,3,5) + >>> np.all(np.flipud(A) == A[::-1,...]) + True + + >>> np.flipud([1,2]) + array([2, 1]) + + """ + m = asanyarray(m) + if m.ndim < 1: + raise ValueError("Input must be >= 1-d.") + return m[::-1, ...] + + +@set_array_function_like_doc +@set_module('numpy') +def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): + """ + Return a 2-D array with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the output. + M : int, optional + Number of columns in the output. If None, defaults to `N`. + k : int, optional + Index of the diagonal: 0 (the default) refers to the main diagonal, + a positive value refers to an upper diagonal, and a negative value + to a lower diagonal. + dtype : data-type, optional + Data-type of the returned array. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + + .. versionadded:: 1.14.0 + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + I : ndarray of shape (N,M) + An array where all elements are equal to zero, except for the `k`-th + diagonal, whose values are equal to one. + + See Also + -------- + identity : (almost) equivalent function + diag : diagonal 2-D array from a 1-D array specified by the user. + + Examples + -------- + >>> np.eye(2, dtype=int) + array([[1, 0], + [0, 1]]) + >>> np.eye(3, k=1) + array([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + + """ + if like is not None: + return _eye_with_like( + like, N, M=M, k=k, dtype=dtype, order=order, device=device + ) + if M is None: + M = N + m = zeros((N, M), dtype=dtype, order=order, device=device) + if k >= M: + return m + # Ensure M and k are integers, so we don't get any surprise casting + # results in the expressions `M-k` and `M+1` used below. This avoids + # a problem with inputs with type (for example) np.uint64. + M = operator.index(M) + k = operator.index(k) + if k >= 0: + i = k + else: + i = (-k) * M + m[:M-k].flat[i::M+1] = 1 + return m + + +_eye_with_like = array_function_dispatch()(eye) + + +def _diag_dispatcher(v, k=None): + return (v,) + + +@array_function_dispatch(_diag_dispatcher) +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + See the more detailed documentation for ``numpy.diagonal`` if you use this + function to extract a diagonal and wish to write to the resulting array; + whether it returns a copy or a view depends on what version of numpy you + are using. + + Parameters + ---------- + v : array_like + If `v` is a 2-D array, return a copy of its `k`-th diagonal. + If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th + diagonal. + k : int, optional + Diagonal in question. The default is 0. Use `k>0` for diagonals + above the main diagonal, and `k<0` for diagonals below the main + diagonal. + + Returns + ------- + out : ndarray + The extracted diagonal or constructed diagonal array. + + See Also + -------- + diagonal : Return specified diagonals. + diagflat : Create a 2-D array with the flattened input as a diagonal. + trace : Sum along diagonals. + triu : Upper triangle of an array. + tril : Lower triangle of an array. + + Examples + -------- + >>> x = np.arange(9).reshape((3,3)) + >>> x + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + + >>> np.diag(x) + array([0, 4, 8]) + >>> np.diag(x, k=1) + array([1, 5]) + >>> np.diag(x, k=-1) + array([3, 7]) + + >>> np.diag(np.diag(x)) + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 8]]) + + """ + v = asanyarray(v) + s = v.shape + if len(s) == 1: + n = s[0]+abs(k) + res = zeros((n, n), v.dtype) + if k >= 0: + i = k + else: + i = (-k) * n + res[:n-k].flat[i::n+1] = v + return res + elif len(s) == 2: + return diagonal(v, k) + else: + raise ValueError("Input must be 1- or 2-d.") + + +@array_function_dispatch(_diag_dispatcher) +def diagflat(v, k=0): + """ + Create a two-dimensional array with the flattened input as a diagonal. + + Parameters + ---------- + v : array_like + Input data, which is flattened and set as the `k`-th + diagonal of the output. + k : int, optional + Diagonal to set; 0, the default, corresponds to the "main" diagonal, + a positive (negative) `k` giving the number of the diagonal above + (below) the main. + + Returns + ------- + out : ndarray + The 2-D output array. + + See Also + -------- + diag : MATLAB work-alike for 1-D and 2-D arrays. + diagonal : Return specified diagonals. + trace : Sum along diagonals. + + Examples + -------- + >>> np.diagflat([[1,2], [3,4]]) + array([[1, 0, 0, 0], + [0, 2, 0, 0], + [0, 0, 3, 0], + [0, 0, 0, 4]]) + + >>> np.diagflat([1,2], 1) + array([[0, 1, 0], + [0, 0, 2], + [0, 0, 0]]) + + """ + conv = _array_converter(v) + v, = conv.as_arrays(subok=False) + v = v.ravel() + s = len(v) + n = s + abs(k) + res = zeros((n, n), v.dtype) + if (k >= 0): + i = arange(0, n-k, dtype=intp) + fi = i+k+i*n + else: + i = arange(0, n+k, dtype=intp) + fi = i+(i-k)*n + res.flat[fi] = v + + return conv.wrap(res) + + +@set_array_function_like_doc +@set_module('numpy') +def tri(N, M=None, k=0, dtype=float, *, like=None): + """ + An array with ones at and below the given diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the array. + M : int, optional + Number of columns in the array. + By default, `M` is taken equal to `N`. + k : int, optional + The sub-diagonal at and below which the array is filled. + `k` = 0 is the main diagonal, while `k` < 0 is below it, + and `k` > 0 is above. The default is 0. + dtype : dtype, optional + Data type of the returned array. The default is float. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + tri : ndarray of shape (N, M) + Array with its lower triangle filled with ones and zero elsewhere; + in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise. + + Examples + -------- + >>> np.tri(3, 5, 2, dtype=int) + array([[1, 1, 1, 0, 0], + [1, 1, 1, 1, 0], + [1, 1, 1, 1, 1]]) + + >>> np.tri(3, 5, -1) + array([[0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0.], + [1., 1., 0., 0., 0.]]) + + """ + if like is not None: + return _tri_with_like(like, N, M=M, k=k, dtype=dtype) + + if M is None: + M = N + + m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), + arange(-k, M-k, dtype=_min_int(-k, M - k))) + + # Avoid making a copy if the requested type is already bool + m = m.astype(dtype, copy=False) + + return m + + +_tri_with_like = array_function_dispatch()(tri) + + +def _trilu_dispatcher(m, k=None): + return (m,) + + +@array_function_dispatch(_trilu_dispatcher) +def tril(m, k=0): + """ + Lower triangle of an array. + + Return a copy of an array with elements above the `k`-th diagonal zeroed. + For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two + axes. + + Parameters + ---------- + m : array_like, shape (..., M, N) + Input array. + k : int, optional + Diagonal above which to zero elements. `k = 0` (the default) is the + main diagonal, `k < 0` is below it and `k > 0` is above. + + Returns + ------- + tril : ndarray, shape (..., M, N) + Lower triangle of `m`, of same shape and data-type as `m`. + + See Also + -------- + triu : same thing, only for the upper triangle + + Examples + -------- + >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 0, 0, 0], + [ 4, 0, 0], + [ 7, 8, 0], + [10, 11, 12]]) + + >>> np.tril(np.arange(3*4*5).reshape(3, 4, 5)) + array([[[ 0, 0, 0, 0, 0], + [ 5, 6, 0, 0, 0], + [10, 11, 12, 0, 0], + [15, 16, 17, 18, 0]], + [[20, 0, 0, 0, 0], + [25, 26, 0, 0, 0], + [30, 31, 32, 0, 0], + [35, 36, 37, 38, 0]], + [[40, 0, 0, 0, 0], + [45, 46, 0, 0, 0], + [50, 51, 52, 0, 0], + [55, 56, 57, 58, 0]]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k, dtype=bool) + + return where(mask, m, zeros(1, m.dtype)) + + +@array_function_dispatch(_trilu_dispatcher) +def triu(m, k=0): + """ + Upper triangle of an array. + + Return a copy of an array with the elements below the `k`-th diagonal + zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the + final two axes. + + Please refer to the documentation for `tril` for further details. + + See Also + -------- + tril : lower triangle of an array + + Examples + -------- + >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 1, 2, 3], + [ 4, 5, 6], + [ 0, 8, 9], + [ 0, 0, 12]]) + + >>> np.triu(np.arange(3*4*5).reshape(3, 4, 5)) + array([[[ 0, 1, 2, 3, 4], + [ 0, 6, 7, 8, 9], + [ 0, 0, 12, 13, 14], + [ 0, 0, 0, 18, 19]], + [[20, 21, 22, 23, 24], + [ 0, 26, 27, 28, 29], + [ 0, 0, 32, 33, 34], + [ 0, 0, 0, 38, 39]], + [[40, 41, 42, 43, 44], + [ 0, 46, 47, 48, 49], + [ 0, 0, 52, 53, 54], + [ 0, 0, 0, 58, 59]]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k-1, dtype=bool) + + return where(mask, zeros(1, m.dtype), m) + + +def _vander_dispatcher(x, N=None, increasing=None): + return (x,) + + +# Originally borrowed from John Hunter and matplotlib +@array_function_dispatch(_vander_dispatcher) +def vander(x, N=None, increasing=False): + """ + Generate a Vandermonde matrix. + + The columns of the output matrix are powers of the input vector. The + order of the powers is determined by the `increasing` boolean argument. + Specifically, when `increasing` is False, the `i`-th output column is + the input vector raised element-wise to the power of ``N - i - 1``. Such + a matrix with a geometric progression in each row is named for Alexandre- + Theophile Vandermonde. + + Parameters + ---------- + x : array_like + 1-D input array. + N : int, optional + Number of columns in the output. If `N` is not specified, a square + array is returned (``N = len(x)``). + increasing : bool, optional + Order of the powers of the columns. If True, the powers increase + from left to right, if False (the default) they are reversed. + + .. versionadded:: 1.9.0 + + Returns + ------- + out : ndarray + Vandermonde matrix. If `increasing` is False, the first column is + ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is + True, the columns are ``x^0, x^1, ..., x^(N-1)``. + + See Also + -------- + polynomial.polynomial.polyvander + + Examples + -------- + >>> x = np.array([1, 2, 3, 5]) + >>> N = 3 + >>> np.vander(x, N) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> np.column_stack([x**(N-1-i) for i in range(N)]) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> x = np.array([1, 2, 3, 5]) + >>> np.vander(x) + array([[ 1, 1, 1, 1], + [ 8, 4, 2, 1], + [ 27, 9, 3, 1], + [125, 25, 5, 1]]) + >>> np.vander(x, increasing=True) + array([[ 1, 1, 1, 1], + [ 1, 2, 4, 8], + [ 1, 3, 9, 27], + [ 1, 5, 25, 125]]) + + The determinant of a square Vandermonde matrix is the product + of the differences between the values of the input vector: + + >>> np.linalg.det(np.vander(x)) + 48.000000000000043 # may vary + >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) + 48 + + """ + x = asarray(x) + if x.ndim != 1: + raise ValueError("x must be a one-dimensional array or sequence.") + if N is None: + N = len(x) + + v = empty((len(x), N), dtype=promote_types(x.dtype, int)) + tmp = v[:, ::-1] if not increasing else v + + if N > 0: + tmp[:, 0] = 1 + if N > 1: + tmp[:, 1:] = x[:, None] + multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1) + + return v + + +def _histogram2d_dispatcher(x, y, bins=None, range=None, density=None, + weights=None): + yield x + yield y + + # This terrible logic is adapted from the checks in histogram2d + try: + N = len(bins) + except TypeError: + N = 1 + if N == 2: + yield from bins # bins=[x, y] + else: + yield bins + + yield weights + + +@array_function_dispatch(_histogram2d_dispatcher) +def histogram2d(x, y, bins=10, range=None, density=None, weights=None): + """ + Compute the bi-dimensional histogram of two data samples. + + Parameters + ---------- + x : array_like, shape (N,) + An array containing the x coordinates of the points to be + histogrammed. + y : array_like, shape (N,) + An array containing the y coordinates of the points to be + histogrammed. + bins : int or array_like or [int, int] or [array, array], optional + The bin specification: + + * If int, the number of bins for the two dimensions (nx=ny=bins). + * If array_like, the bin edges for the two dimensions + (x_edges=y_edges=bins). + * If [int, int], the number of bins in each dimension + (nx, ny = bins). + * If [array, array], the bin edges in each dimension + (x_edges, y_edges = bins). + * A combination [int, array] or [array, int], where int + is the number of bins and array is the bin edges. + + range : array_like, shape(2,2), optional + The leftmost and rightmost edges of the bins along each dimension + (if not specified explicitly in the `bins` parameters): + ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range + will be considered outliers and not tallied in the histogram. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_area``. + weights : array_like, shape(N,), optional + An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. + Weights are normalized to 1 if `density` is True. If `density` is + False, the values of the returned histogram are equal to the sum of + the weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray, shape(nx, ny) + The bi-dimensional histogram of samples `x` and `y`. Values in `x` + are histogrammed along the first dimension and values in `y` are + histogrammed along the second dimension. + xedges : ndarray, shape(nx+1,) + The bin edges along the first dimension. + yedges : ndarray, shape(ny+1,) + The bin edges along the second dimension. + + See Also + -------- + histogram : 1D histogram + histogramdd : Multidimensional histogram + + Notes + ----- + When `density` is True, then the returned histogram is the sample + density, defined such that the sum over bins of the product + ``bin_value * bin_area`` is 1. + + Please note that the histogram does not follow the Cartesian convention + where `x` values are on the abscissa and `y` values on the ordinate + axis. Rather, `x` is histogrammed along the first dimension of the + array (vertical), and `y` along the second dimension of the array + (horizontal). This ensures compatibility with `histogramdd`. + + Examples + -------- + >>> from matplotlib.image import NonUniformImage + >>> import matplotlib.pyplot as plt + + Construct a 2-D histogram with variable bin width. First define the bin + edges: + + >>> xedges = [0, 1, 3, 5] + >>> yedges = [0, 2, 3, 4, 6] + + Next we create a histogram H with random bin content: + + >>> x = np.random.normal(2, 1, 100) + >>> y = np.random.normal(1, 1, 100) + >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) + >>> # Histogram does not follow Cartesian convention (see Notes), + >>> # therefore transpose H for visualization purposes. + >>> H = H.T + + :func:`imshow ` can only display square bins: + + >>> fig = plt.figure(figsize=(7, 3)) + >>> ax = fig.add_subplot(131, title='imshow: square bins') + >>> plt.imshow(H, interpolation='nearest', origin='lower', + ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) + + + :func:`pcolormesh ` can display actual edges: + + >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', + ... aspect='equal') + >>> X, Y = np.meshgrid(xedges, yedges) + >>> ax.pcolormesh(X, Y, H) + + + :class:`NonUniformImage ` can be used to + display actual bin edges with interpolation: + + >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', + ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) + >>> im = NonUniformImage(ax, interpolation='bilinear') + >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 + >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 + >>> im.set_data(xcenters, ycenters, H) + >>> ax.add_image(im) + >>> plt.show() + + It is also possible to construct a 2-D histogram without specifying bin + edges: + + >>> # Generate non-symmetric test data + >>> n = 10000 + >>> x = np.linspace(1, 100, n) + >>> y = 2*np.log(x) + np.random.rand(n) - 0.5 + >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges + >>> H, yedges, xedges = np.histogram2d(y, x, bins=20) + + Now we can plot the histogram using + :func:`pcolormesh `, and a + :func:`hexbin ` for comparison. + + >>> # Plot histogram using pcolormesh + >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True) + >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow') + >>> ax1.plot(x, 2*np.log(x), 'k-') + >>> ax1.set_xlim(x.min(), x.max()) + >>> ax1.set_ylim(y.min(), y.max()) + >>> ax1.set_xlabel('x') + >>> ax1.set_ylabel('y') + >>> ax1.set_title('histogram2d') + >>> ax1.grid() + + >>> # Create hexbin plot for comparison + >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow') + >>> ax2.plot(x, 2*np.log(x), 'k-') + >>> ax2.set_title('hexbin') + >>> ax2.set_xlim(x.min(), x.max()) + >>> ax2.set_xlabel('x') + >>> ax2.grid() + + >>> plt.show() + """ + from numpy import histogramdd + + if len(x) != len(y): + raise ValueError('x and y must have the same length.') + + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1 and N != 2: + xedges = yedges = asarray(bins) + bins = [xedges, yedges] + hist, edges = histogramdd([x, y], bins, range, density, weights) + return hist, edges[0], edges[1] + + +@set_module('numpy') +def mask_indices(n, mask_func, k=0): + """ + Return the indices to access (n, n) arrays, given a masking function. + + Assume `mask_func` is a function that, for a square array a of size + ``(n, n)`` with a possible offset argument `k`, when called as + ``mask_func(a, k)`` returns a new array with zeros in certain locations + (functions like `triu` or `tril` do precisely this). Then this function + returns the indices where the non-zero values would be located. + + Parameters + ---------- + n : int + The returned indices will be valid to access arrays of shape (n, n). + mask_func : callable + A function whose call signature is similar to that of `triu`, `tril`. + That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. + `k` is an optional argument to the function. + k : scalar + An optional argument which is passed through to `mask_func`. Functions + like `triu`, `tril` take a second argument that is interpreted as an + offset. + + Returns + ------- + indices : tuple of arrays. + The `n` arrays of indices corresponding to the locations where + ``mask_func(np.ones((n, n)), k)`` is True. + + See Also + -------- + triu, tril, triu_indices, tril_indices + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + These are the indices that would allow you to access the upper triangular + part of any 3x3 array: + + >>> iu = np.mask_indices(3, np.triu) + + For example, if `a` is a 3x3 array: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> a[iu] + array([0, 1, 2, 4, 5, 8]) + + An offset can be passed also to the masking function. This gets us the + indices starting on the first diagonal right of the main one: + + >>> iu1 = np.mask_indices(3, np.triu, 1) + + with which we now extract only three elements: + + >>> a[iu1] + array([1, 2, 5]) + + """ + m = ones((n, n), int) + a = mask_func(m, k) + return nonzero(a != 0) + + +@set_module('numpy') +def tril_indices(n, k=0, m=None): + """ + Return the indices for the lower-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The row dimension of the arrays for which the returned + indices will be valid. + k : int, optional + Diagonal offset (see `tril` for details). + m : int, optional + .. versionadded:: 1.9.0 + + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple of arrays + The indices for the triangle. The returned tuple contains two arrays, + each with the indices along one dimension of the array. + + See also + -------- + triu_indices : similar function, for upper-triangular. + mask_indices : generic function accepting an arbitrary mask function. + tril, triu + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Compute two different sets of indices to access 4x4 arrays, one for the + lower triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> il1 = np.tril_indices(4) + >>> il2 = np.tril_indices(4, 2) + + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[il1] + array([ 0, 4, 5, ..., 13, 14, 15]) + + And for assigning values: + + >>> a[il1] = -1 + >>> a + array([[-1, 1, 2, 3], + [-1, -1, 6, 7], + [-1, -1, -1, 11], + [-1, -1, -1, -1]]) + + These cover almost the whole array (two diagonals right of the main one): + + >>> a[il2] = -10 + >>> a + array([[-10, -10, -10, 3], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]]) + + """ + tri_ = tri(n, m, k=k, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) + + +def _trilu_indices_form_dispatcher(arr, k=None): + return (arr,) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def tril_indices_from(arr, k=0): + """ + Return the indices for the lower-triangle of arr. + + See `tril_indices` for full details. + + Parameters + ---------- + arr : array_like + The indices will be valid for square arrays whose dimensions are + the same as arr. + k : int, optional + Diagonal offset (see `tril` for details). + + Examples + -------- + + Create a 4 by 4 array. + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Pass the array to get the indices of the lower triangular elements. + + >>> trili = np.tril_indices_from(a) + >>> trili + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + + >>> a[trili] + array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) + + This is syntactic sugar for tril_indices(). + + >>> np.tril_indices(a.shape[0]) + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + + Use the `k` parameter to return the indices for the lower triangular array + up to the k-th diagonal. + + >>> trili1 = np.tril_indices_from(a, k=1) + >>> a[trili1] + array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15]) + + See Also + -------- + tril_indices, tril, triu_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1]) + + +@set_module('numpy') +def triu_indices(n, k=0, m=None): + """ + Return the indices for the upper-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The size of the arrays for which the returned indices will + be valid. + k : int, optional + Diagonal offset (see `triu` for details). + m : int, optional + .. versionadded:: 1.9.0 + + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple, shape(2) of ndarrays, shape(`n`) + The indices for the triangle. The returned tuple contains two arrays, + each with the indices along one dimension of the array. Can be used + to slice a ndarray of shape(`n`, `n`). + + See also + -------- + tril_indices : similar function, for lower-triangular. + mask_indices : generic function accepting an arbitrary mask function. + triu, tril + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Compute two different sets of indices to access 4x4 arrays, one for the + upper triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> iu1 = np.triu_indices(4) + >>> iu2 = np.triu_indices(4, 2) + + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[iu1] + array([ 0, 1, 2, ..., 10, 11, 15]) + + And for assigning values: + + >>> a[iu1] = -1 + >>> a + array([[-1, -1, -1, -1], + [ 4, -1, -1, -1], + [ 8, 9, -1, -1], + [12, 13, 14, -1]]) + + These cover only a small part of the whole array (two diagonals right + of the main one): + + >>> a[iu2] = -10 + >>> a + array([[ -1, -1, -10, -10], + [ 4, -1, -1, -10], + [ 8, 9, -1, -1], + [ 12, 13, 14, -1]]) + + """ + tri_ = ~tri(n, m, k=k - 1, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def triu_indices_from(arr, k=0): + """ + Return the indices for the upper-triangle of arr. + + See `triu_indices` for full details. + + Parameters + ---------- + arr : ndarray, shape(N, N) + The indices will be valid for square arrays. + k : int, optional + Diagonal offset (see `triu` for details). + + Returns + ------- + triu_indices_from : tuple, shape(2) of ndarray, shape(N) + Indices for the upper-triangle of `arr`. + + Examples + -------- + + Create a 4 by 4 array. + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Pass the array to get the indices of the upper triangular elements. + + >>> triui = np.triu_indices_from(a) + >>> triui + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + >>> a[triui] + array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) + + This is syntactic sugar for triu_indices(). + + >>> np.triu_indices(a.shape[0]) + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + Use the `k` parameter to return the indices for the upper triangular array + from the k-th diagonal. + + >>> triuim1 = np.triu_indices_from(a, k=1) + >>> a[triuim1] + array([ 1, 2, 3, 6, 7, 11]) + + + See Also + -------- + triu_indices, triu, tril_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) diff --git a/phivenv/Lib/site-packages/numpy/lib/_twodim_base_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_twodim_base_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..38ab1973c73fb07c6aa683fe9c3ec7960920dea9 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_twodim_base_impl.pyi @@ -0,0 +1,243 @@ +import builtins +from collections.abc import Callable, Sequence +from typing import ( + Any, + overload, + TypeVar, + Literal as L, +) + +import numpy as np +from numpy import ( + generic, + number, + timedelta64, + datetime64, + int_, + intp, + float64, + signedinteger, + floating, + complexfloating, + object_, + _OrderCF, +) + +from numpy._typing import ( + DTypeLike, + _DTypeLike, + ArrayLike, + _ArrayLike, + NDArray, + _SupportsArrayFunc, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeObject_co, +) + +_T = TypeVar("_T") +_SCT = TypeVar("_SCT", bound=generic) + +# The returned arrays dtype must be compatible with `np.equal` +_MaskFunc = Callable[ + [NDArray[int_], _T], + NDArray[number[Any] | np.bool | timedelta64 | datetime64 | object_], +] + +__all__: list[str] + +@overload +def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def fliplr(m: ArrayLike) -> NDArray[Any]: ... + +@overload +def flipud(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def flipud(m: ArrayLike) -> NDArray[Any]: ... + +@overload +def eye( + N: int, + M: None | int = ..., + k: int = ..., + dtype: None = ..., + order: _OrderCF = ..., + *, + device: None | L["cpu"] = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[float64]: ... +@overload +def eye( + N: int, + M: None | int = ..., + k: int = ..., + dtype: _DTypeLike[_SCT] = ..., + order: _OrderCF = ..., + *, + device: None | L["cpu"] = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def eye( + N: int, + M: None | int = ..., + k: int = ..., + dtype: DTypeLike = ..., + order: _OrderCF = ..., + *, + device: None | L["cpu"] = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def diag(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +@overload +def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def diagflat(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +@overload +def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def tri( + N: int, + M: None | int = ..., + k: int = ..., + dtype: None = ..., + *, + like: None | _SupportsArrayFunc = ... +) -> NDArray[float64]: ... +@overload +def tri( + N: int, + M: None | int = ..., + k: int = ..., + dtype: _DTypeLike[_SCT] = ..., + *, + like: None | _SupportsArrayFunc = ... +) -> NDArray[_SCT]: ... +@overload +def tri( + N: int, + M: None | int = ..., + k: int = ..., + dtype: DTypeLike = ..., + *, + like: None | _SupportsArrayFunc = ... +) -> NDArray[Any]: ... + +@overload +def tril(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +@overload +def tril(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def triu(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +@overload +def triu(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def vander( # type: ignore[misc] + x: _ArrayLikeInt_co, + N: None | int = ..., + increasing: bool = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def vander( # type: ignore[misc] + x: _ArrayLikeFloat_co, + N: None | int = ..., + increasing: bool = ..., +) -> NDArray[floating[Any]]: ... +@overload +def vander( + x: _ArrayLikeComplex_co, + N: None | int = ..., + increasing: bool = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def vander( + x: _ArrayLikeObject_co, + N: None | int = ..., + increasing: bool = ..., +) -> NDArray[object_]: ... + +@overload +def histogram2d( # type: ignore[misc] + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + bins: int | Sequence[int] = ..., + range: None | _ArrayLikeFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLikeFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[floating[Any]], + NDArray[floating[Any]], +]: ... +@overload +def histogram2d( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + bins: int | Sequence[int] = ..., + range: None | _ArrayLikeFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLikeFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[complexfloating[Any, Any]], + NDArray[complexfloating[Any, Any]], +]: ... +@overload # TODO: Sort out `bins` +def histogram2d( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + bins: Sequence[_ArrayLikeInt_co], + range: None | _ArrayLikeFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLikeFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[Any], + NDArray[Any], +]: ... + +# NOTE: we're assuming/demanding here the `mask_func` returns +# an ndarray of shape `(n, n)`; otherwise there is the possibility +# of the output tuple having more or less than 2 elements +@overload +def mask_indices( + n: int, + mask_func: _MaskFunc[int], + k: int = ..., +) -> tuple[NDArray[intp], NDArray[intp]]: ... +@overload +def mask_indices( + n: int, + mask_func: _MaskFunc[_T], + k: _T, +) -> tuple[NDArray[intp], NDArray[intp]]: ... + +def tril_indices( + n: int, + k: int = ..., + m: None | int = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... + +def tril_indices_from( + arr: NDArray[Any], + k: int = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... + +def triu_indices( + n: int, + k: int = ..., + m: None | int = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... + +def triu_indices_from( + arr: NDArray[Any], + k: int = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_type_check_impl.py b/phivenv/Lib/site-packages/numpy/lib/_type_check_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..4bd2f916e6b5c87c07a7b19ed330634a07d66975 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_type_check_impl.py @@ -0,0 +1,699 @@ +"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py + +""" +import functools + +__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', + 'isreal', 'nan_to_num', 'real', 'real_if_close', + 'typename', 'mintypecode', + 'common_type'] + +from .._utils import set_module +import numpy._core.numeric as _nx +from numpy._core.numeric import asarray, asanyarray, isnan, zeros +from numpy._core import overrides, getlimits +from ._ufunclike_impl import isneginf, isposinf + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?' + + +@set_module('numpy') +def mintypecode(typechars, typeset='GDFgdf', default='d'): + """ + Return the character for the minimum-size type to which given types can + be safely cast. + + The returned type character must represent the smallest size dtype such + that an array of the returned type can handle the data from an array of + all types in `typechars` (or if `typechars` is an array, then its + dtype.char). + + Parameters + ---------- + typechars : list of str or array_like + If a list of strings, each string should represent a dtype. + If array_like, the character representation of the array dtype is used. + typeset : str or list of str, optional + The set of characters that the returned character is chosen from. + The default set is 'GDFgdf'. + default : str, optional + The default character, this is returned if none of the characters in + `typechars` matches a character in `typeset`. + + Returns + ------- + typechar : str + The character representing the minimum-size type that was found. + + See Also + -------- + dtype + + Examples + -------- + >>> np.mintypecode(['d', 'f', 'S']) + 'd' + >>> x = np.array([1.1, 2-3.j]) + >>> np.mintypecode(x) + 'D' + + >>> np.mintypecode('abceh', default='G') + 'G' + + """ + typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char + for t in typechars) + intersection = set(t for t in typecodes if t in typeset) + if not intersection: + return default + if 'F' in intersection and 'd' in intersection: + return 'D' + return min(intersection, key=_typecodes_by_elsize.index) + + +def _real_dispatcher(val): + return (val,) + + +@array_function_dispatch(_real_dispatcher) +def real(val): + """ + Return the real part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The real component of the complex argument. If `val` is real, the type + of `val` is used for the output. If `val` has complex elements, the + returned type is float. + + See Also + -------- + real_if_close, imag, angle + + Examples + -------- + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.real + array([1., 3., 5.]) + >>> a.real = 9 + >>> a + array([9.+2.j, 9.+4.j, 9.+6.j]) + >>> a.real = np.array([9, 8, 7]) + >>> a + array([9.+2.j, 8.+4.j, 7.+6.j]) + >>> np.real(1 + 1j) + 1.0 + + """ + try: + return val.real + except AttributeError: + return asanyarray(val).real + + +def _imag_dispatcher(val): + return (val,) + + +@array_function_dispatch(_imag_dispatcher) +def imag(val): + """ + Return the imaginary part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The imaginary component of the complex argument. If `val` is real, + the type of `val` is used for the output. If `val` has complex + elements, the returned type is float. + + See Also + -------- + real, angle, real_if_close + + Examples + -------- + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.imag + array([2., 4., 6.]) + >>> a.imag = np.array([8, 10, 12]) + >>> a + array([1. +8.j, 3.+10.j, 5.+12.j]) + >>> np.imag(1 + 1j) + 1.0 + + """ + try: + return val.imag + except AttributeError: + return asanyarray(val).imag + + +def _is_type_dispatcher(x): + return (x,) + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplex(x): + """ + Returns a bool array, where True if input element is complex. + + What is tested is whether the input has a non-zero imaginary part, not if + the input type is complex. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray of bools + Output array. + + See Also + -------- + isreal + iscomplexobj : Return True if x is a complex type or an array of complex + numbers. + + Examples + -------- + >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) + array([ True, False, False, False, False, True]) + + """ + ax = asanyarray(x) + if issubclass(ax.dtype.type, _nx.complexfloating): + return ax.imag != 0 + res = zeros(ax.shape, bool) + return res[()] # convert to scalar if needed + + +@array_function_dispatch(_is_type_dispatcher) +def isreal(x): + """ + Returns a bool array, where True if input element is real. + + If element has complex type with zero imaginary part, the return value + for that element is True. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray, bool + Boolean array of same shape as `x`. + + Notes + ----- + `isreal` may behave unexpectedly for string or object arrays (see examples) + + See Also + -------- + iscomplex + isrealobj : Return True if x is not a complex type. + + Examples + -------- + >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) + >>> np.isreal(a) + array([False, True, True, True, True, False]) + + The function does not work on string arrays. + + >>> a = np.array([2j, "a"], dtype="U") + >>> np.isreal(a) # Warns about non-elementwise comparison + False + + Returns True for all elements in input array of ``dtype=object`` even if + any of the elements is complex. + + >>> a = np.array([1, "2", 3+4j], dtype=object) + >>> np.isreal(a) + array([ True, True, True]) + + isreal should not be used with object arrays + + >>> a = np.array([1+2j, 2+1j], dtype=object) + >>> np.isreal(a) + array([ True, True]) + + """ + return imag(x) == 0 + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplexobj(x): + """ + Check for a complex type or an array of complex numbers. + + The type of the input is checked, not the value. Even if the input + has an imaginary part equal to zero, `iscomplexobj` evaluates to True. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + iscomplexobj : bool + The return value, True if `x` is of a complex type or has at least + one complex element. + + See Also + -------- + isrealobj, iscomplex + + Examples + -------- + >>> np.iscomplexobj(1) + False + >>> np.iscomplexobj(1+0j) + True + >>> np.iscomplexobj([3, 1+0j, True]) + True + + """ + try: + dtype = x.dtype + type_ = dtype.type + except AttributeError: + type_ = asarray(x).dtype.type + return issubclass(type_, _nx.complexfloating) + + +@array_function_dispatch(_is_type_dispatcher) +def isrealobj(x): + """ + Return True if x is a not complex type or an array of complex numbers. + + The type of the input is checked, not the value. So even if the input + has an imaginary part equal to zero, `isrealobj` evaluates to False + if the data type is complex. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + y : bool + The return value, False if `x` is of a complex type. + + See Also + -------- + iscomplexobj, isreal + + Notes + ----- + The function is only meant for arrays with numerical values but it + accepts all other objects. Since it assumes array input, the return + value of other objects may be True. + + >>> np.isrealobj('A string') + True + >>> np.isrealobj(False) + True + >>> np.isrealobj(None) + True + + Examples + -------- + >>> np.isrealobj(1) + True + >>> np.isrealobj(1+0j) + False + >>> np.isrealobj([3, 1+0j, True]) + False + + """ + return not iscomplexobj(x) + +#----------------------------------------------------------------------------- + +def _getmaxmin(t): + from numpy._core import getlimits + f = getlimits.finfo(t) + return f.max, f.min + + +def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None): + return (x,) + + +@array_function_dispatch(_nan_to_num_dispatcher) +def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): + """ + Replace NaN with zero and infinity with large finite numbers (default + behaviour) or with the numbers defined by the user using the `nan`, + `posinf` and/or `neginf` keywords. + + If `x` is inexact, NaN is replaced by zero or by the user defined value in + `nan` keyword, infinity is replaced by the largest finite floating point + values representable by ``x.dtype`` or by the user defined value in + `posinf` keyword and -infinity is replaced by the most negative finite + floating point values representable by ``x.dtype`` or by the user defined + value in `neginf` keyword. + + For complex dtypes, the above is applied to each of the real and + imaginary components of `x` separately. + + If `x` is not inexact, then no replacements are made. + + Parameters + ---------- + x : scalar or array_like + Input data. + copy : bool, optional + Whether to create a copy of `x` (True) or to replace values + in-place (False). The in-place operation only occurs if + casting to an array does not require a copy. + Default is True. + + .. versionadded:: 1.13 + nan : int, float, optional + Value to be used to fill NaN values. If no value is passed + then NaN values will be replaced with 0.0. + + .. versionadded:: 1.17 + posinf : int, float, optional + Value to be used to fill positive infinity values. If no value is + passed then positive infinity values will be replaced with a very + large number. + + .. versionadded:: 1.17 + neginf : int, float, optional + Value to be used to fill negative infinity values. If no value is + passed then negative infinity values will be replaced with a very + small (or negative) number. + + .. versionadded:: 1.17 + + + + Returns + ------- + out : ndarray + `x`, with the non-finite values replaced. If `copy` is False, this may + be `x` itself. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity. + isneginf : Shows which elements are negative infinity. + isposinf : Shows which elements are positive infinity. + isnan : Shows which elements are Not a Number (NaN). + isfinite : Shows which elements are finite (not NaN, not infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + + Examples + -------- + >>> np.nan_to_num(np.inf) + 1.7976931348623157e+308 + >>> np.nan_to_num(-np.inf) + -1.7976931348623157e+308 + >>> np.nan_to_num(np.nan) + 0.0 + >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) + >>> np.nan_to_num(x) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) + >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, + -1.2800000e+02, 1.2800000e+02]) + >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) + >>> np.nan_to_num(y) + array([ 1.79769313e+308 +0.00000000e+000j, # may vary + 0.00000000e+000 +0.00000000e+000j, + 0.00000000e+000 +1.79769313e+308j]) + >>> np.nan_to_num(y, nan=111111, posinf=222222) + array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) + """ + x = _nx.array(x, subok=True, copy=copy) + xtype = x.dtype.type + + isscalar = (x.ndim == 0) + + if not issubclass(xtype, _nx.inexact): + return x[()] if isscalar else x + + iscomplex = issubclass(xtype, _nx.complexfloating) + + dest = (x.real, x.imag) if iscomplex else (x,) + maxf, minf = _getmaxmin(x.real.dtype) + if posinf is not None: + maxf = posinf + if neginf is not None: + minf = neginf + for d in dest: + idx_nan = isnan(d) + idx_posinf = isposinf(d) + idx_neginf = isneginf(d) + _nx.copyto(d, nan, where=idx_nan) + _nx.copyto(d, maxf, where=idx_posinf) + _nx.copyto(d, minf, where=idx_neginf) + return x[()] if isscalar else x + +#----------------------------------------------------------------------------- + +def _real_if_close_dispatcher(a, tol=None): + return (a,) + + +@array_function_dispatch(_real_if_close_dispatcher) +def real_if_close(a, tol=100): + """ + If input is complex with all imaginary parts close to zero, return + real parts. + + "Close to zero" is defined as `tol` * (machine epsilon of the type for + `a`). + + Parameters + ---------- + a : array_like + Input array. + tol : float + Tolerance in machine epsilons for the complex part of the elements + in the array. If the tolerance is <=1, then the absolute tolerance + is used. + + Returns + ------- + out : ndarray + If `a` is real, the type of `a` is used for the output. If `a` + has complex elements, the returned type is float. + + See Also + -------- + real, imag, angle + + Notes + ----- + Machine epsilon varies from machine to machine and between data types + but Python floats on most platforms have a machine epsilon equal to + 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print + out the machine epsilon for floats. + + Examples + -------- + >>> np.finfo(float).eps + 2.2204460492503131e-16 # may vary + + >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000) + array([2.1, 5.2]) + >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000) + array([2.1+4.e-13j, 5.2 + 3e-15j]) + + """ + a = asanyarray(a) + type_ = a.dtype.type + if not issubclass(type_, _nx.complexfloating): + return a + if tol > 1: + f = getlimits.finfo(type_) + tol = f.eps * tol + if _nx.all(_nx.absolute(a.imag) < tol): + a = a.real + return a + + +#----------------------------------------------------------------------------- + +_namefromtype = {'S1': 'character', + '?': 'bool', + 'b': 'signed char', + 'B': 'unsigned char', + 'h': 'short', + 'H': 'unsigned short', + 'i': 'integer', + 'I': 'unsigned integer', + 'l': 'long integer', + 'L': 'unsigned long integer', + 'q': 'long long integer', + 'Q': 'unsigned long long integer', + 'f': 'single precision', + 'd': 'double precision', + 'g': 'long precision', + 'F': 'complex single precision', + 'D': 'complex double precision', + 'G': 'complex long double precision', + 'S': 'string', + 'U': 'unicode', + 'V': 'void', + 'O': 'object' + } + +@set_module('numpy') +def typename(char): + """ + Return a description for the given data type code. + + Parameters + ---------- + char : str + Data type code. + + Returns + ------- + out : str + Description of the input data type code. + + See Also + -------- + dtype + + Examples + -------- + >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', + ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] + >>> for typechar in typechars: + ... print(typechar, ' : ', np.typename(typechar)) + ... + S1 : character + ? : bool + B : unsigned char + D : complex double precision + G : complex long double precision + F : complex single precision + I : unsigned integer + H : unsigned short + L : unsigned long integer + O : object + Q : unsigned long long integer + S : string + U : unicode + V : void + b : signed char + d : double precision + g : long precision + f : single precision + i : integer + h : short + l : long integer + q : long long integer + + """ + return _namefromtype[char] + +#----------------------------------------------------------------------------- + + +#determine the "minimum common type" for a group of arrays. +array_type = [[_nx.float16, _nx.float32, _nx.float64, _nx.longdouble], + [None, _nx.complex64, _nx.complex128, _nx.clongdouble]] +array_precision = {_nx.float16: 0, + _nx.float32: 1, + _nx.float64: 2, + _nx.longdouble: 3, + _nx.complex64: 1, + _nx.complex128: 2, + _nx.clongdouble: 3} + + +def _common_type_dispatcher(*arrays): + return arrays + + +@array_function_dispatch(_common_type_dispatcher) +def common_type(*arrays): + """ + Return a scalar type which is common to the input arrays. + + The return type will always be an inexact (i.e. floating point) scalar + type, even if all the arrays are integer arrays. If one of the inputs is + an integer array, the minimum precision type that is returned is a + 64-bit floating point dtype. + + All input arrays except int64 and uint64 can be safely cast to the + returned dtype without loss of information. + + Parameters + ---------- + array1, array2, ... : ndarrays + Input arrays. + + Returns + ------- + out : data type code + Data type code. + + See Also + -------- + dtype, mintypecode + + Examples + -------- + >>> np.common_type(np.arange(2, dtype=np.float32)) + + >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) + + >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) + + + """ + is_complex = False + precision = 0 + for a in arrays: + t = a.dtype.type + if iscomplexobj(a): + is_complex = True + if issubclass(t, _nx.integer): + p = 2 # array_precision[_nx.double] + else: + p = array_precision.get(t, None) + if p is None: + raise TypeError("can't get common type for non-numeric array") + precision = max(precision, p) + if is_complex: + return array_type[1][precision] + else: + return array_type[0][precision] diff --git a/phivenv/Lib/site-packages/numpy/lib/_type_check_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_type_check_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..511eaa0e75b68f8d5b1e404f228831e47ffcbb26 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_type_check_impl.pyi @@ -0,0 +1,204 @@ +from collections.abc import Container, Iterable +from typing import ( + Literal as L, + Any, + overload, + TypeVar, + Protocol, +) + +import numpy as np +from numpy import ( + dtype, + generic, + floating, + float64, + complexfloating, + integer, +) + +from numpy._typing import ( + ArrayLike, + DTypeLike, + NBitBase, + NDArray, + _64Bit, + _SupportsDType, + _ScalarLike_co, + _ArrayLike, + _DTypeLikeComplex, +) + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_SCT = TypeVar("_SCT", bound=generic) +_NBit1 = TypeVar("_NBit1", bound=NBitBase) +_NBit2 = TypeVar("_NBit2", bound=NBitBase) + +class _SupportsReal(Protocol[_T_co]): + @property + def real(self) -> _T_co: ... + +class _SupportsImag(Protocol[_T_co]): + @property + def imag(self) -> _T_co: ... + +__all__: list[str] + +def mintypecode( + typechars: Iterable[str | ArrayLike], + typeset: Container[str] = ..., + default: str = ..., +) -> str: ... + +@overload +def real(val: _SupportsReal[_T]) -> _T: ... +@overload +def real(val: ArrayLike) -> NDArray[Any]: ... + +@overload +def imag(val: _SupportsImag[_T]) -> _T: ... +@overload +def imag(val: ArrayLike) -> NDArray[Any]: ... + +@overload +def iscomplex(x: _ScalarLike_co) -> np.bool: ... # type: ignore[misc] +@overload +def iscomplex(x: ArrayLike) -> NDArray[np.bool]: ... + +@overload +def isreal(x: _ScalarLike_co) -> np.bool: ... # type: ignore[misc] +@overload +def isreal(x: ArrayLike) -> NDArray[np.bool]: ... + +def iscomplexobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ... + +def isrealobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ... + +@overload +def nan_to_num( # type: ignore[misc] + x: _SCT, + copy: bool = ..., + nan: float = ..., + posinf: None | float = ..., + neginf: None | float = ..., +) -> _SCT: ... +@overload +def nan_to_num( + x: _ScalarLike_co, + copy: bool = ..., + nan: float = ..., + posinf: None | float = ..., + neginf: None | float = ..., +) -> Any: ... +@overload +def nan_to_num( + x: _ArrayLike[_SCT], + copy: bool = ..., + nan: float = ..., + posinf: None | float = ..., + neginf: None | float = ..., +) -> NDArray[_SCT]: ... +@overload +def nan_to_num( + x: ArrayLike, + copy: bool = ..., + nan: float = ..., + posinf: None | float = ..., + neginf: None | float = ..., +) -> NDArray[Any]: ... + +# If one passes a complex array to `real_if_close`, then one is reasonably +# expected to verify the output dtype (so we can return an unsafe union here) + +@overload +def real_if_close( # type: ignore[misc] + a: _ArrayLike[complexfloating[_NBit1, _NBit1]], + tol: float = ..., +) -> NDArray[floating[_NBit1]] | NDArray[complexfloating[_NBit1, _NBit1]]: ... +@overload +def real_if_close( + a: _ArrayLike[_SCT], + tol: float = ..., +) -> NDArray[_SCT]: ... +@overload +def real_if_close( + a: ArrayLike, + tol: float = ..., +) -> NDArray[Any]: ... + +@overload +def typename(char: L['S1']) -> L['character']: ... +@overload +def typename(char: L['?']) -> L['bool']: ... +@overload +def typename(char: L['b']) -> L['signed char']: ... +@overload +def typename(char: L['B']) -> L['unsigned char']: ... +@overload +def typename(char: L['h']) -> L['short']: ... +@overload +def typename(char: L['H']) -> L['unsigned short']: ... +@overload +def typename(char: L['i']) -> L['integer']: ... +@overload +def typename(char: L['I']) -> L['unsigned integer']: ... +@overload +def typename(char: L['l']) -> L['long integer']: ... +@overload +def typename(char: L['L']) -> L['unsigned long integer']: ... +@overload +def typename(char: L['q']) -> L['long long integer']: ... +@overload +def typename(char: L['Q']) -> L['unsigned long long integer']: ... +@overload +def typename(char: L['f']) -> L['single precision']: ... +@overload +def typename(char: L['d']) -> L['double precision']: ... +@overload +def typename(char: L['g']) -> L['long precision']: ... +@overload +def typename(char: L['F']) -> L['complex single precision']: ... +@overload +def typename(char: L['D']) -> L['complex double precision']: ... +@overload +def typename(char: L['G']) -> L['complex long double precision']: ... +@overload +def typename(char: L['S']) -> L['string']: ... +@overload +def typename(char: L['U']) -> L['unicode']: ... +@overload +def typename(char: L['V']) -> L['void']: ... +@overload +def typename(char: L['O']) -> L['object']: ... + +@overload +def common_type( # type: ignore[misc] + *arrays: _SupportsDType[dtype[ + integer[Any] + ]] +) -> type[floating[_64Bit]]: ... +@overload +def common_type( # type: ignore[misc] + *arrays: _SupportsDType[dtype[ + floating[_NBit1] + ]] +) -> type[floating[_NBit1]]: ... +@overload +def common_type( # type: ignore[misc] + *arrays: _SupportsDType[dtype[ + integer[Any] | floating[_NBit1] + ]] +) -> type[floating[_NBit1 | _64Bit]]: ... +@overload +def common_type( # type: ignore[misc] + *arrays: _SupportsDType[dtype[ + floating[_NBit1] | complexfloating[_NBit2, _NBit2] + ]] +) -> type[complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]]: ... +@overload +def common_type( + *arrays: _SupportsDType[dtype[ + integer[Any] | floating[_NBit1] | complexfloating[_NBit2, _NBit2] + ]] +) -> type[complexfloating[_64Bit | _NBit1 | _NBit2, _64Bit | _NBit1 | _NBit2]]: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_ufunclike_impl.py b/phivenv/Lib/site-packages/numpy/lib/_ufunclike_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..cbc45bf4c9087bdfdc75f20d49ce89f6caf6a1a5 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_ufunclike_impl.py @@ -0,0 +1,206 @@ +""" +Module of functions that are like ufuncs in acting on arrays and optionally +storing results in an output array. + +""" +__all__ = ['fix', 'isneginf', 'isposinf'] + +import numpy._core.numeric as nx +from numpy._core.overrides import array_function_dispatch +import warnings +import functools + + +def _dispatcher(x, out=None): + return (x, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def fix(x, out=None): + """ + Round to nearest integer towards zero. + + Round an array of floats element-wise to nearest integer towards zero. + The rounded values are returned as floats. + + Parameters + ---------- + x : array_like + An array of floats to be rounded + out : ndarray, optional + A location into which the result is stored. If provided, it must have + a shape that the input broadcasts to. If not provided or None, a + freshly-allocated array is returned. + + Returns + ------- + out : ndarray of floats + A float array with the same dimensions as the input. + If second argument is not supplied then a float array is returned + with the rounded values. + + If a second argument is supplied the result is stored there. + The return value `out` is then a reference to that array. + + See Also + -------- + rint, trunc, floor, ceil + around : Round to given number of decimals + + Examples + -------- + >>> np.fix(3.14) + 3.0 + >>> np.fix(3) + 3.0 + >>> np.fix([2.1, 2.9, -2.1, -2.9]) + array([ 2., 2., -2., -2.]) + + """ + # promote back to an array if flattened + res = nx.asanyarray(nx.ceil(x, out=out)) + res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) + + # when no out argument is passed and no subclasses are involved, flatten + # scalars + if out is None and type(res) is nx.ndarray: + res = res[()] + return res + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def isposinf(x, out=None): + """ + Test element-wise for positive infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + out : array_like, optional + A location into which the result is stored. If provided, it must have a + shape that the input broadcasts to. If not provided or None, a + freshly-allocated boolean array is returned. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a boolean array is returned + with values True where the corresponding element of the input is + positive infinity and values False where the element of the input is + not positive infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as zeros + and ones, if the type is boolean then as False and True. + The return value `out` is then a reference to that array. + + See Also + -------- + isinf, isneginf, isfinite, isnan + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values + + Examples + -------- + >>> np.isposinf(np.inf) + True + >>> np.isposinf(-np.inf) + False + >>> np.isposinf([-np.inf, 0., np.inf]) + array([False, False, True]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isposinf(x, y) + array([0, 0, 1]) + >>> y + array([0, 0, 1]) + + """ + is_inf = nx.isinf(x) + try: + signbit = ~nx.signbit(x) + except TypeError as e: + dtype = nx.asanyarray(x).dtype + raise TypeError(f'This operation is not supported for {dtype} values ' + 'because it would be ambiguous.') from e + else: + return nx.logical_and(is_inf, signbit, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def isneginf(x, out=None): + """ + Test element-wise for negative infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + out : array_like, optional + A location into which the result is stored. If provided, it must have a + shape that the input broadcasts to. If not provided or None, a + freshly-allocated boolean array is returned. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a numpy boolean array is + returned with values True where the corresponding element of the + input is negative infinity and values False where the element of + the input is not negative infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as + zeros and ones, if the type is boolean then as False and True. The + return value `out` is then a reference to that array. + + See Also + -------- + isinf, isposinf, isnan, isfinite + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values. + + Examples + -------- + >>> np.isneginf(-np.inf) + True + >>> np.isneginf(np.inf) + False + >>> np.isneginf([-np.inf, 0., np.inf]) + array([ True, False, False]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isneginf(x, y) + array([1, 0, 0]) + >>> y + array([1, 0, 0]) + + """ + is_inf = nx.isinf(x) + try: + signbit = nx.signbit(x) + except TypeError as e: + dtype = nx.asanyarray(x).dtype + raise TypeError(f'This operation is not supported for {dtype} values ' + 'because it would be ambiguous.') from e + else: + return nx.logical_and(is_inf, signbit, out) diff --git a/phivenv/Lib/site-packages/numpy/lib/_ufunclike_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_ufunclike_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..42549d4fc841acaa2181c3eb4a391e9e123c82b2 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_ufunclike_impl.pyi @@ -0,0 +1,67 @@ +from typing import Any, overload, TypeVar + +import numpy as np +from numpy import floating, object_ +from numpy._typing import ( + NDArray, + _FloatLike_co, + _ArrayLikeFloat_co, + _ArrayLikeObject_co, +) + +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) + +__all__: list[str] + +@overload +def fix( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> floating[Any]: ... +@overload +def fix( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[floating[Any]]: ... +@overload +def fix( + x: _ArrayLikeObject_co, + out: None = ..., +) -> NDArray[object_]: ... +@overload +def fix( + x: _ArrayLikeFloat_co | _ArrayLikeObject_co, + out: _ArrayType, +) -> _ArrayType: ... + +@overload +def isposinf( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> np.bool: ... +@overload +def isposinf( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[np.bool]: ... +@overload +def isposinf( + x: _ArrayLikeFloat_co, + out: _ArrayType, +) -> _ArrayType: ... + +@overload +def isneginf( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> np.bool: ... +@overload +def isneginf( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[np.bool]: ... +@overload +def isneginf( + x: _ArrayLikeFloat_co, + out: _ArrayType, +) -> _ArrayType: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_user_array_impl.py b/phivenv/Lib/site-packages/numpy/lib/_user_array_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..605e662380ce61372a4e163bf83571e19c5bab48 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_user_array_impl.py @@ -0,0 +1,289 @@ +""" +Container class for backward compatibility with NumArray. + +The user_array.container class exists for backward compatibility with NumArray +and is not meant to be used in new code. If you need to create an array +container class, we recommend either creating a class that wraps an ndarray +or subclasses ndarray. + +""" +from numpy._core import ( + array, asarray, absolute, add, subtract, multiply, divide, + remainder, power, left_shift, right_shift, bitwise_and, bitwise_or, + bitwise_xor, invert, less, less_equal, not_equal, equal, greater, + greater_equal, shape, reshape, arange, sin, sqrt, transpose +) + + +class container: + """ + container(data, dtype=None, copy=True) + + Standard container-class for easy multiple-inheritance. + + Methods + ------- + copy + tostring + byteswap + astype + + """ + def __init__(self, data, dtype=None, copy=True): + self.array = array(data, dtype, copy=copy) + + def __repr__(self): + if self.ndim > 0: + return self.__class__.__name__ + repr(self.array)[len("array"):] + else: + return self.__class__.__name__ + "(" + repr(self.array) + ")" + + def __array__(self, t=None): + if t: + return self.array.astype(t) + return self.array + + # Array as sequence + def __len__(self): + return len(self.array) + + def __getitem__(self, index): + return self._rc(self.array[index]) + + def __setitem__(self, index, value): + self.array[index] = asarray(value, self.dtype) + + def __abs__(self): + return self._rc(absolute(self.array)) + + def __neg__(self): + return self._rc(-self.array) + + def __add__(self, other): + return self._rc(self.array + asarray(other)) + + __radd__ = __add__ + + def __iadd__(self, other): + add(self.array, other, self.array) + return self + + def __sub__(self, other): + return self._rc(self.array - asarray(other)) + + def __rsub__(self, other): + return self._rc(asarray(other) - self.array) + + def __isub__(self, other): + subtract(self.array, other, self.array) + return self + + def __mul__(self, other): + return self._rc(multiply(self.array, asarray(other))) + + __rmul__ = __mul__ + + def __imul__(self, other): + multiply(self.array, other, self.array) + return self + + def __div__(self, other): + return self._rc(divide(self.array, asarray(other))) + + def __rdiv__(self, other): + return self._rc(divide(asarray(other), self.array)) + + def __idiv__(self, other): + divide(self.array, other, self.array) + return self + + def __mod__(self, other): + return self._rc(remainder(self.array, other)) + + def __rmod__(self, other): + return self._rc(remainder(other, self.array)) + + def __imod__(self, other): + remainder(self.array, other, self.array) + return self + + def __divmod__(self, other): + return (self._rc(divide(self.array, other)), + self._rc(remainder(self.array, other))) + + def __rdivmod__(self, other): + return (self._rc(divide(other, self.array)), + self._rc(remainder(other, self.array))) + + def __pow__(self, other): + return self._rc(power(self.array, asarray(other))) + + def __rpow__(self, other): + return self._rc(power(asarray(other), self.array)) + + def __ipow__(self, other): + power(self.array, other, self.array) + return self + + def __lshift__(self, other): + return self._rc(left_shift(self.array, other)) + + def __rshift__(self, other): + return self._rc(right_shift(self.array, other)) + + def __rlshift__(self, other): + return self._rc(left_shift(other, self.array)) + + def __rrshift__(self, other): + return self._rc(right_shift(other, self.array)) + + def __ilshift__(self, other): + left_shift(self.array, other, self.array) + return self + + def __irshift__(self, other): + right_shift(self.array, other, self.array) + return self + + def __and__(self, other): + return self._rc(bitwise_and(self.array, other)) + + def __rand__(self, other): + return self._rc(bitwise_and(other, self.array)) + + def __iand__(self, other): + bitwise_and(self.array, other, self.array) + return self + + def __xor__(self, other): + return self._rc(bitwise_xor(self.array, other)) + + def __rxor__(self, other): + return self._rc(bitwise_xor(other, self.array)) + + def __ixor__(self, other): + bitwise_xor(self.array, other, self.array) + return self + + def __or__(self, other): + return self._rc(bitwise_or(self.array, other)) + + def __ror__(self, other): + return self._rc(bitwise_or(other, self.array)) + + def __ior__(self, other): + bitwise_or(self.array, other, self.array) + return self + + def __pos__(self): + return self._rc(self.array) + + def __invert__(self): + return self._rc(invert(self.array)) + + def _scalarfunc(self, func): + if self.ndim == 0: + return func(self[0]) + else: + raise TypeError( + "only rank-0 arrays can be converted to Python scalars.") + + def __complex__(self): + return self._scalarfunc(complex) + + def __float__(self): + return self._scalarfunc(float) + + def __int__(self): + return self._scalarfunc(int) + + def __hex__(self): + return self._scalarfunc(hex) + + def __oct__(self): + return self._scalarfunc(oct) + + def __lt__(self, other): + return self._rc(less(self.array, other)) + + def __le__(self, other): + return self._rc(less_equal(self.array, other)) + + def __eq__(self, other): + return self._rc(equal(self.array, other)) + + def __ne__(self, other): + return self._rc(not_equal(self.array, other)) + + def __gt__(self, other): + return self._rc(greater(self.array, other)) + + def __ge__(self, other): + return self._rc(greater_equal(self.array, other)) + + def copy(self): + "" + return self._rc(self.array.copy()) + + def tostring(self): + "" + return self.array.tostring() + + def tobytes(self): + "" + return self.array.tobytes() + + def byteswap(self): + "" + return self._rc(self.array.byteswap()) + + def astype(self, typecode): + "" + return self._rc(self.array.astype(typecode)) + + def _rc(self, a): + if len(shape(a)) == 0: + return a + else: + return self.__class__(a) + + def __array_wrap__(self, *args): + return self.__class__(args[0]) + + def __setattr__(self, attr, value): + if attr == 'array': + object.__setattr__(self, attr, value) + return + try: + self.array.__setattr__(attr, value) + except AttributeError: + object.__setattr__(self, attr, value) + + # Only called after other approaches fail. + def __getattr__(self, attr): + if (attr == 'array'): + return object.__getattribute__(self, attr) + return self.array.__getattribute__(attr) + + +############################################################# +# Test of class container +############################################################# +if __name__ == '__main__': + temp = reshape(arange(10000), (100, 100)) + + ua = container(temp) + # new object created begin test + print(dir(ua)) + print(shape(ua), ua.shape) # I have changed Numeric.py + + ua_small = ua[:3, :5] + print(ua_small) + # this did not change ua[0,0], which is not normal behavior + ua_small[0, 0] = 10 + print(ua_small[0, 0], ua[0, 0]) + print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2)) + print(less(ua_small, 103), type(less(ua_small, 103))) + print(type(ua_small * reshape(arange(15), shape(ua_small)))) + print(reshape(ua_small, (5, 3))) + print(transpose(ua_small)) diff --git a/phivenv/Lib/site-packages/numpy/lib/_utils_impl.py b/phivenv/Lib/site-packages/numpy/lib/_utils_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..3f2b050eb92d10339ca8c0137dabdf45c3dae8f9 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_utils_impl.py @@ -0,0 +1,771 @@ +import os +import sys +import textwrap +import types +import re +import warnings +import functools +import platform + +from numpy._core import ndarray +from numpy._utils import set_module +import numpy as np + +__all__ = [ + 'get_include', 'info', 'show_runtime' +] + + +@set_module('numpy') +def show_runtime(): + """ + Print information about various resources in the system + including available intrinsic support and BLAS/LAPACK library + in use + + .. versionadded:: 1.24.0 + + See Also + -------- + show_config : Show libraries in the system on which NumPy was built. + + Notes + ----- + 1. Information is derived with the help of `threadpoolctl `_ + library if available. + 2. SIMD related information is derived from ``__cpu_features__``, + ``__cpu_baseline__`` and ``__cpu_dispatch__`` + + """ + from numpy._core._multiarray_umath import ( + __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + ) + from pprint import pprint + config_found = [{ + "numpy_version": np.__version__, + "python": sys.version, + "uname": platform.uname(), + }] + features_found, features_not_found = [], [] + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + features_found.append(feature) + else: + features_not_found.append(feature) + config_found.append({ + "simd_extensions": { + "baseline": __cpu_baseline__, + "found": features_found, + "not_found": features_not_found + } + }) + try: + from threadpoolctl import threadpool_info + config_found.extend(threadpool_info()) + except ImportError: + print("WARNING: `threadpoolctl` not found in system!" + " Install it by `pip install threadpoolctl`." + " Once installed, try `np.show_runtime` again" + " for more detailed build information") + pprint(config_found) + + +@set_module('numpy') +def get_include(): + """ + Return the directory that contains the NumPy \\*.h header files. + + Extension modules that need to compile against NumPy may need to use this + function to locate the appropriate include directory. + + Notes + ----- + When using ``setuptools``, for example in ``setup.py``:: + + import numpy as np + ... + Extension('extension_name', ... + include_dirs=[np.get_include()]) + ... + + Note that a CLI tool ``numpy-config`` was introduced in NumPy 2.0, using + that is likely preferred for build systems other than ``setuptools``:: + + $ numpy-config --cflags + -I/path/to/site-packages/numpy/_core/include + + # Or rely on pkg-config: + $ export PKG_CONFIG_PATH=$(numpy-config --pkgconfigdir) + $ pkg-config --cflags + -I/path/to/site-packages/numpy/_core/include + + """ + import numpy + if numpy.show_config is None: + # running from numpy source directory + d = os.path.join(os.path.dirname(numpy.__file__), '_core', 'include') + else: + # using installed numpy core headers + import numpy._core as _core + d = os.path.join(os.path.dirname(_core.__file__), 'include') + return d + + +class _Deprecate: + """ + Decorator class to deprecate old functions. + + Refer to `deprecate` for details. + + See Also + -------- + deprecate + + """ + + def __init__(self, old_name=None, new_name=None, message=None): + self.old_name = old_name + self.new_name = new_name + self.message = message + + def __call__(self, func, *args, **kwargs): + """ + Decorator call. Refer to ``decorate``. + + """ + old_name = self.old_name + new_name = self.new_name + message = self.message + + if old_name is None: + old_name = func.__name__ + if new_name is None: + depdoc = "`%s` is deprecated!" % old_name + else: + depdoc = "`%s` is deprecated, use `%s` instead!" % \ + (old_name, new_name) + + if message is not None: + depdoc += "\n" + message + + @functools.wraps(func) + def newfunc(*args, **kwds): + warnings.warn(depdoc, DeprecationWarning, stacklevel=2) + return func(*args, **kwds) + + newfunc.__name__ = old_name + doc = func.__doc__ + if doc is None: + doc = depdoc + else: + lines = doc.expandtabs().split('\n') + indent = _get_indent(lines[1:]) + if lines[0].lstrip(): + # Indent the original first line to let inspect.cleandoc() + # dedent the docstring despite the deprecation notice. + doc = indent * ' ' + doc + else: + # Remove the same leading blank lines as cleandoc() would. + skip = len(lines[0]) + 1 + for line in lines[1:]: + if len(line) > indent: + break + skip += len(line) + 1 + doc = doc[skip:] + depdoc = textwrap.indent(depdoc, ' ' * indent) + doc = '\n\n'.join([depdoc, doc]) + newfunc.__doc__ = doc + + return newfunc + + +def _get_indent(lines): + """ + Determines the leading whitespace that could be removed from all the lines. + """ + indent = sys.maxsize + for line in lines: + content = len(line.lstrip()) + if content: + indent = min(indent, len(line) - content) + if indent == sys.maxsize: + indent = 0 + return indent + + +def deprecate(*args, **kwargs): + """ + Issues a DeprecationWarning, adds warning to `old_name`'s + docstring, rebinds ``old_name.__name__`` and returns the new + function object. + + This function may also be used as a decorator. + + .. deprecated:: 2.0 + Use `~warnings.warn` with :exc:`DeprecationWarning` instead. + + Parameters + ---------- + func : function + The function to be deprecated. + old_name : str, optional + The name of the function to be deprecated. Default is None, in + which case the name of `func` is used. + new_name : str, optional + The new name for the function. Default is None, in which case the + deprecation message is that `old_name` is deprecated. If given, the + deprecation message is that `old_name` is deprecated and `new_name` + should be used instead. + message : str, optional + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + old_func : function + The deprecated function. + + Examples + -------- + Note that ``olduint`` returns a value after printing Deprecation + Warning: + + >>> olduint = np.lib.utils.deprecate(np.uint) + DeprecationWarning: `uint64` is deprecated! # may vary + >>> olduint(6) + 6 + + """ + # Deprecate may be run as a function or as a decorator + # If run as a function, we initialise the decorator class + # and execute its __call__ method. + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`deprecate` is deprecated, " + "use `warn` with `DeprecationWarning` instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + if args: + fn = args[0] + args = args[1:] + + return _Deprecate(*args, **kwargs)(fn) + else: + return _Deprecate(*args, **kwargs) + + +def deprecate_with_doc(msg): + """ + Deprecates a function and includes the deprecation in its docstring. + + .. deprecated:: 2.0 + Use `~warnings.warn` with :exc:`DeprecationWarning` instead. + + This function is used as a decorator. It returns an object that can be + used to issue a DeprecationWarning, by passing the to-be decorated + function as argument, this adds warning to the to-be decorated function's + docstring and returns the new function object. + + See Also + -------- + deprecate : Decorate a function such that it issues a + :exc:`DeprecationWarning` + + Parameters + ---------- + msg : str + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + obj : object + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`deprecate` is deprecated, " + "use `warn` with `DeprecationWarning` instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + return _Deprecate(message=msg) + + +#----------------------------------------------------------------------------- + + +# NOTE: pydoc defines a help function which works similarly to this +# except it uses a pager to take over the screen. + +# combine name and arguments and split to multiple lines of width +# characters. End lines on a comma and begin argument list indented with +# the rest of the arguments. +def _split_line(name, arguments, width): + firstwidth = len(name) + k = firstwidth + newstr = name + sepstr = ", " + arglist = arguments.split(sepstr) + for argument in arglist: + if k == firstwidth: + addstr = "" + else: + addstr = sepstr + k = k + len(argument) + len(addstr) + if k > width: + k = firstwidth + 1 + len(argument) + newstr = newstr + ",\n" + " "*(firstwidth+2) + argument + else: + newstr = newstr + addstr + argument + return newstr + +_namedict = None +_dictlist = None + +# Traverse all module directories underneath globals +# to see if something is defined +def _makenamedict(module='numpy'): + module = __import__(module, globals(), locals(), []) + thedict = {module.__name__:module.__dict__} + dictlist = [module.__name__] + totraverse = [module.__dict__] + while True: + if len(totraverse) == 0: + break + thisdict = totraverse.pop(0) + for x in thisdict.keys(): + if isinstance(thisdict[x], types.ModuleType): + modname = thisdict[x].__name__ + if modname not in dictlist: + moddict = thisdict[x].__dict__ + dictlist.append(modname) + totraverse.append(moddict) + thedict[modname] = moddict + return thedict, dictlist + + +def _info(obj, output=None): + """Provide information about ndarray obj. + + Parameters + ---------- + obj : ndarray + Must be ndarray, not checked. + output + Where printed output goes. + + Notes + ----- + Copied over from the numarray module prior to its removal. + Adapted somewhat as only numpy is an option now. + + Called by info. + + """ + extra = "" + tic = "" + bp = lambda x: x + cls = getattr(obj, '__class__', type(obj)) + nm = getattr(cls, '__name__', cls) + strides = obj.strides + endian = obj.dtype.byteorder + + if output is None: + output = sys.stdout + + print("class: ", nm, file=output) + print("shape: ", obj.shape, file=output) + print("strides: ", strides, file=output) + print("itemsize: ", obj.itemsize, file=output) + print("aligned: ", bp(obj.flags.aligned), file=output) + print("contiguous: ", bp(obj.flags.contiguous), file=output) + print("fortran: ", obj.flags.fortran, file=output) + print( + "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), + file=output + ) + print("byteorder: ", end=' ', file=output) + if endian in ['|', '=']: + print("%s%s%s" % (tic, sys.byteorder, tic), file=output) + byteswap = False + elif endian == '>': + print("%sbig%s" % (tic, tic), file=output) + byteswap = sys.byteorder != "big" + else: + print("%slittle%s" % (tic, tic), file=output) + byteswap = sys.byteorder != "little" + print("byteswap: ", bp(byteswap), file=output) + print("type: %s" % obj.dtype, file=output) + + +@set_module('numpy') +def info(object=None, maxwidth=76, output=None, toplevel='numpy'): + """ + Get help information for an array, function, class, or module. + + Parameters + ---------- + object : object or str, optional + Input object or name to get information about. If `object` is + an `ndarray` instance, information about the array is printed. + If `object` is a numpy object, its docstring is given. If it is + a string, available modules are searched for matching objects. + If None, information about `info` itself is returned. + maxwidth : int, optional + Printing width. + output : file like object, optional + File like object that the output is written to, default is + ``None``, in which case ``sys.stdout`` will be used. + The object has to be opened in 'w' or 'a' mode. + toplevel : str, optional + Start search at this level. + + Notes + ----- + When used interactively with an object, ``np.info(obj)`` is equivalent + to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython + prompt. + + Examples + -------- + >>> np.info(np.polyval) # doctest: +SKIP + polyval(p, x) + Evaluate the polynomial p at x. + ... + + When using a string for `object` it is possible to get multiple results. + + >>> np.info('fft') # doctest: +SKIP + *** Found in numpy *** + Core FFT routines + ... + *** Found in numpy.fft *** + fft(a, n=None, axis=-1) + ... + *** Repeat reference found in numpy.fft.fftpack *** + *** Total of 3 references found. *** + + When the argument is an array, information about the array is printed. + + >>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64) + >>> np.info(a) + class: ndarray + shape: (2, 3) + strides: (24, 8) + itemsize: 8 + aligned: True + contiguous: True + fortran: False + data pointer: 0x562b6e0d2860 # may vary + byteorder: little + byteswap: False + type: complex64 + + """ + global _namedict, _dictlist + # Local import to speed up numpy's import time. + import pydoc + import inspect + + if (hasattr(object, '_ppimport_importer') or + hasattr(object, '_ppimport_module')): + object = object._ppimport_module + elif hasattr(object, '_ppimport_attr'): + object = object._ppimport_attr + + if output is None: + output = sys.stdout + + if object is None: + info(info) + elif isinstance(object, ndarray): + _info(object, output=output) + elif isinstance(object, str): + if _namedict is None: + _namedict, _dictlist = _makenamedict(toplevel) + numfound = 0 + objlist = [] + for namestr in _dictlist: + try: + obj = _namedict[namestr][object] + if id(obj) in objlist: + print("\n " + "*** Repeat reference found in %s *** " % namestr, + file=output + ) + else: + objlist.append(id(obj)) + print(" *** Found in %s ***" % namestr, file=output) + info(obj) + print("-"*maxwidth, file=output) + numfound += 1 + except KeyError: + pass + if numfound == 0: + print("Help for %s not found." % object, file=output) + else: + print("\n " + "*** Total of %d references found. ***" % numfound, + file=output + ) + + elif inspect.isfunction(object) or inspect.ismethod(object): + name = object.__name__ + try: + arguments = str(inspect.signature(object)) + except Exception: + arguments = "()" + + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + print(inspect.getdoc(object), file=output) + + elif inspect.isclass(object): + name = object.__name__ + try: + arguments = str(inspect.signature(object)) + except Exception: + arguments = "()" + + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + doc1 = inspect.getdoc(object) + if doc1 is None: + if hasattr(object, '__init__'): + print(inspect.getdoc(object.__init__), file=output) + else: + print(inspect.getdoc(object), file=output) + + methods = pydoc.allmethods(object) + + public_methods = [meth for meth in methods if meth[0] != '_'] + if public_methods: + print("\n\nMethods:\n", file=output) + for meth in public_methods: + thisobj = getattr(object, meth, None) + if thisobj is not None: + methstr, other = pydoc.splitdoc( + inspect.getdoc(thisobj) or "None" + ) + print(" %s -- %s" % (meth, methstr), file=output) + + elif hasattr(object, '__doc__'): + print(inspect.getdoc(object), file=output) + + +def safe_eval(source): + """ + Protected string evaluation. + + .. deprecated:: 2.0 + Use `ast.literal_eval` instead. + + Evaluate a string containing a Python literal expression without + allowing the execution of arbitrary non-literal code. + + .. warning:: + + This function is identical to :py:meth:`ast.literal_eval` and + has the same security implications. It may not always be safe + to evaluate large input strings. + + Parameters + ---------- + source : str + The string to evaluate. + + Returns + ------- + obj : object + The result of evaluating `source`. + + Raises + ------ + SyntaxError + If the code has invalid Python syntax, or if it contains + non-literal code. + + Examples + -------- + >>> np.safe_eval('1') + 1 + >>> np.safe_eval('[1, 2, 3]') + [1, 2, 3] + >>> np.safe_eval('{"foo": ("bar", 10.0)}') + {'foo': ('bar', 10.0)} + + >>> np.safe_eval('import os') + Traceback (most recent call last): + ... + SyntaxError: invalid syntax + + >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') + Traceback (most recent call last): + ... + ValueError: malformed node or string: <_ast.Call object at 0x...> + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`safe_eval` is deprecated. Use `ast.literal_eval` instead. " + "Be aware of security implications, such as memory exhaustion " + "based attacks (deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + # Local import to speed up numpy's import time. + import ast + return ast.literal_eval(source) + + +def _median_nancheck(data, result, axis): + """ + Utility function to check median result from data for NaN values at the end + and return NaN in that case. Input result can also be a MaskedArray. + + Parameters + ---------- + data : array + Sorted input data to median function + result : Array or MaskedArray + Result of median function. + axis : int + Axis along which the median was computed. + + Returns + ------- + result : scalar or ndarray + Median or NaN in axes which contained NaN in the input. If the input + was an array, NaN will be inserted in-place. If a scalar, either the + input itself or a scalar NaN. + """ + if data.size == 0: + return result + potential_nans = data.take(-1, axis=axis) + n = np.isnan(potential_nans) + # masked NaN values are ok, although for masked the copyto may fail for + # unmasked ones (this was always broken) when the result is a scalar. + if np.ma.isMaskedArray(n): + n = n.filled(False) + + if not n.any(): + return result + + # Without given output, it is possible that the current result is a + # numpy scalar, which is not writeable. If so, just return nan. + if isinstance(result, np.generic): + return potential_nans + + # Otherwise copy NaNs (if there are any) + np.copyto(result, potential_nans, where=n) + return result + +def _opt_info(): + """ + Returns a string containing the CPU features supported + by the current build. + + The format of the string can be explained as follows: + - Dispatched features supported by the running machine end with `*`. + - Dispatched features not supported by the running machine + end with `?`. + - Remaining features represent the baseline. + + Returns: + str: A formatted string indicating the supported CPU features. + """ + from numpy._core._multiarray_umath import ( + __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + ) + + if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0: + return '' + + enabled_features = ' '.join(__cpu_baseline__) + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + enabled_features += f" {feature}*" + else: + enabled_features += f" {feature}?" + + return enabled_features + +def drop_metadata(dtype, /): + """ + Returns the dtype unchanged if it contained no metadata or a copy of the + dtype if it (or any of its structure dtypes) contained metadata. + + This utility is used by `np.save` and `np.savez` to drop metadata before + saving. + + .. note:: + + Due to its limitation this function may move to a more appropriate + home or change in the future and is considered semi-public API only. + + .. warning:: + + This function does not preserve more strange things like record dtypes + and user dtypes may simply return the wrong thing. If you need to be + sure about the latter, check the result with: + ``np.can_cast(new_dtype, dtype, casting="no")``. + + """ + if dtype.fields is not None: + found_metadata = dtype.metadata is not None + + names = [] + formats = [] + offsets = [] + titles = [] + for name, field in dtype.fields.items(): + field_dt = drop_metadata(field[0]) + if field_dt is not field[0]: + found_metadata = True + + names.append(name) + formats.append(field_dt) + offsets.append(field[1]) + titles.append(None if len(field) < 3 else field[2]) + + if not found_metadata: + return dtype + + structure = dict( + names=names, formats=formats, offsets=offsets, titles=titles, + itemsize=dtype.itemsize) + + # NOTE: Could pass (dtype.type, structure) to preserve record dtypes... + return np.dtype(structure, align=dtype.isalignedstruct) + elif dtype.subdtype is not None: + # subarray dtype + subdtype, shape = dtype.subdtype + new_subdtype = drop_metadata(subdtype) + if dtype.metadata is None and new_subdtype is subdtype: + return dtype + + return np.dtype((new_subdtype, shape)) + else: + # Normal unstructured dtype + if dtype.metadata is None: + return dtype + # Note that `dt.str` doesn't round-trip e.g. for user-dtypes. + return np.dtype(dtype.str) diff --git a/phivenv/Lib/site-packages/numpy/lib/_utils_impl.pyi b/phivenv/Lib/site-packages/numpy/lib/_utils_impl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ec2e4d40044ae90db500514f67cd608edadc79f0 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_utils_impl.pyi @@ -0,0 +1,33 @@ +from typing import ( + Any, + TypeVar, + Protocol, +) + +from numpy._core.numerictypes import ( + issubdtype as issubdtype, +) + +_T_contra = TypeVar("_T_contra", contravariant=True) + +# A file-like object opened in `w` mode +class _SupportsWrite(Protocol[_T_contra]): + def write(self, s: _T_contra, /) -> Any: ... + +__all__: list[str] + +def get_include() -> str: ... + +def info( + object: object = ..., + maxwidth: int = ..., + output: None | _SupportsWrite[str] = ..., + toplevel: str = ..., +) -> None: ... + +def source( + object: object, + output: None | _SupportsWrite[str] = ..., +) -> None: ... + +def show_runtime() -> None: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/_version.py b/phivenv/Lib/site-packages/numpy/lib/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..5c837dd1958047532461818ba54baad1df104ba6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_version.py @@ -0,0 +1,155 @@ +"""Utility to compare (NumPy) version strings. + +The NumpyVersion class allows properly comparing numpy version strings. +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. + +""" +import re + + +__all__ = ['NumpyVersion'] + + +class NumpyVersion(): + """Parse and compare numpy version strings. + + NumPy has the following versioning scheme (numbers given are examples; they + can be > 9 in principle): + + - Released version: '1.8.0', '1.8.1', etc. + - Alpha: '1.8.0a1', '1.8.0a2', etc. + - Beta: '1.8.0b1', '1.8.0b2', etc. + - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. + - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) + - Development versions after a1: '1.8.0a1.dev-f1234afa', + '1.8.0b2.dev-f1234afa', + '1.8.1rc1.dev-f1234afa', etc. + - Development versions (no git hash available): '1.8.0.dev-Unknown' + + Comparing needs to be done against a valid version string or other + `NumpyVersion` instance. Note that all development versions of the same + (pre-)release compare equal. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + vstring : str + NumPy version string (``np.__version__``). + + Examples + -------- + >>> from numpy.lib import NumpyVersion + >>> if NumpyVersion(np.__version__) < '1.7.0': + ... print('skip') + >>> # skip + + >>> NumpyVersion('1.7') # raises ValueError, add ".0" + Traceback (most recent call last): + ... + ValueError: Not a valid numpy version string + + """ + + def __init__(self, vstring): + self.vstring = vstring + ver_main = re.match(r'\d+\.\d+\.\d+', vstring) + if not ver_main: + raise ValueError("Not a valid numpy version string") + + self.version = ver_main.group() + self.major, self.minor, self.bugfix = [int(x) for x in + self.version.split('.')] + if len(vstring) == ver_main.end(): + self.pre_release = 'final' + else: + alpha = re.match(r'a\d', vstring[ver_main.end():]) + beta = re.match(r'b\d', vstring[ver_main.end():]) + rc = re.match(r'rc\d', vstring[ver_main.end():]) + pre_rel = [m for m in [alpha, beta, rc] if m is not None] + if pre_rel: + self.pre_release = pre_rel[0].group() + else: + self.pre_release = '' + + self.is_devversion = bool(re.search(r'.dev', vstring)) + + def _compare_version(self, other): + """Compare major.minor.bugfix""" + if self.major == other.major: + if self.minor == other.minor: + if self.bugfix == other.bugfix: + vercmp = 0 + elif self.bugfix > other.bugfix: + vercmp = 1 + else: + vercmp = -1 + elif self.minor > other.minor: + vercmp = 1 + else: + vercmp = -1 + elif self.major > other.major: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare_pre_release(self, other): + """Compare alpha/beta/rc/final.""" + if self.pre_release == other.pre_release: + vercmp = 0 + elif self.pre_release == 'final': + vercmp = 1 + elif other.pre_release == 'final': + vercmp = -1 + elif self.pre_release > other.pre_release: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare(self, other): + if not isinstance(other, (str, NumpyVersion)): + raise ValueError("Invalid object to compare with NumpyVersion.") + + if isinstance(other, str): + other = NumpyVersion(other) + + vercmp = self._compare_version(other) + if vercmp == 0: + # Same x.y.z version, check for alpha/beta/rc + vercmp = self._compare_pre_release(other) + if vercmp == 0: + # Same version and same pre-release, check if dev version + if self.is_devversion is other.is_devversion: + vercmp = 0 + elif self.is_devversion: + vercmp = -1 + else: + vercmp = 1 + + return vercmp + + def __lt__(self, other): + return self._compare(other) < 0 + + def __le__(self, other): + return self._compare(other) <= 0 + + def __eq__(self, other): + return self._compare(other) == 0 + + def __ne__(self, other): + return self._compare(other) != 0 + + def __gt__(self, other): + return self._compare(other) > 0 + + def __ge__(self, other): + return self._compare(other) >= 0 + + def __repr__(self): + return "NumpyVersion(%s)" % self.vstring diff --git a/phivenv/Lib/site-packages/numpy/lib/_version.pyi b/phivenv/Lib/site-packages/numpy/lib/_version.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5f2abbac4ab9082bf45bb7d3e21aca3e743c9f41 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/_version.pyi @@ -0,0 +1,17 @@ +__all__: list[str] + +class NumpyVersion: + vstring: str + version: str + major: int + minor: int + bugfix: int + pre_release: str + is_devversion: bool + def __init__(self, vstring: str) -> None: ... + def __lt__(self, other: str | NumpyVersion) -> bool: ... + def __le__(self, other: str | NumpyVersion) -> bool: ... + def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] + def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] + def __gt__(self, other: str | NumpyVersion) -> bool: ... + def __ge__(self, other: str | NumpyVersion) -> bool: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/array_utils.py b/phivenv/Lib/site-packages/numpy/lib/array_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7124482a0c9bba6e7d0837680bfb098fce0467ab --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/array_utils.py @@ -0,0 +1,7 @@ +from ._array_utils_impl import ( + __all__, + __doc__, + byte_bounds, + normalize_axis_index, + normalize_axis_tuple, +) diff --git a/phivenv/Lib/site-packages/numpy/lib/array_utils.pyi b/phivenv/Lib/site-packages/numpy/lib/array_utils.pyi new file mode 100644 index 0000000000000000000000000000000000000000..231398e0035145a72010d1dc724f931c86f8d047 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/array_utils.pyi @@ -0,0 +1,6 @@ +from ._array_utils_impl import ( + __all__ as __all__, + byte_bounds as byte_bounds, + normalize_axis_index as normalize_axis_index, + normalize_axis_tuple as normalize_axis_tuple, +) diff --git a/phivenv/Lib/site-packages/numpy/lib/format.py b/phivenv/Lib/site-packages/numpy/lib/format.py new file mode 100644 index 0000000000000000000000000000000000000000..aff3a86954198432ddbe7207415745eee4e476f3 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/format.py @@ -0,0 +1,1015 @@ +""" +Binary serialization + +NPY format +========== + +A simple format for saving numpy arrays to disk with the full +information about them. + +The ``.npy`` format is the standard binary file format in NumPy for +persisting a *single* arbitrary NumPy array on disk. The format stores all +of the shape and dtype information necessary to reconstruct the array +correctly even on another machine with a different architecture. +The format is designed to be as simple as possible while achieving +its limited goals. + +The ``.npz`` format is the standard format for persisting *multiple* NumPy +arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` +files, one for each array. + +Capabilities +------------ + +- Can represent all NumPy arrays including nested record arrays and + object arrays. + +- Represents the data in its native binary form. + +- Supports Fortran-contiguous arrays directly. + +- Stores all of the necessary information to reconstruct the array + including shape and dtype on a machine of a different + architecture. Both little-endian and big-endian arrays are + supported, and a file with little-endian numbers will yield + a little-endian array on any machine reading the file. The + types are described in terms of their actual sizes. For example, + if a machine with a 64-bit C "long int" writes out an array with + "long ints", a reading machine with 32-bit C "long ints" will yield + an array with 64-bit integers. + +- Is straightforward to reverse engineer. Datasets often live longer than + the programs that created them. A competent developer should be + able to create a solution in their preferred programming language to + read most ``.npy`` files that they have been given without much + documentation. + +- Allows memory-mapping of the data. See `open_memmap`. + +- Can be read from a filelike stream object instead of an actual file. + +- Stores object arrays, i.e. arrays containing elements that are arbitrary + Python objects. Files with object arrays are not to be mmapable, but + can be read and written to disk. + +Limitations +----------- + +- Arbitrary subclasses of numpy.ndarray are not completely preserved. + Subclasses will be accepted for writing, but only the array data will + be written out. A regular numpy.ndarray object will be created + upon reading the file. + +.. warning:: + + Due to limitations in the interpretation of structured dtypes, dtypes + with fields with empty names will have the names replaced by 'f0', 'f1', + etc. Such arrays will not round-trip through the format entirely + accurately. The data is intact; only the field names will differ. We are + working on a fix for this. This fix will not require a change in the + file format. The arrays with such structures can still be saved and + restored, and the correct dtype may be restored by using the + ``loadedarray.view(correct_dtype)`` method. + +File extensions +--------------- + +We recommend using the ``.npy`` and ``.npz`` extensions for files saved +in this format. This is by no means a requirement; applications may wish +to use these file formats but use an extension specific to the +application. In the absence of an obvious alternative, however, +we suggest using ``.npy`` and ``.npz``. + +Version numbering +----------------- + +The version numbering of these formats is independent of NumPy version +numbering. If the format is upgraded, the code in `numpy.io` will still +be able to read and write Version 1.0 files. + +Format Version 1.0 +------------------ + +The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. + +The next 1 byte is an unsigned byte: the major version number of the file +format, e.g. ``\\x01``. + +The next 1 byte is an unsigned byte: the minor version number of the file +format, e.g. ``\\x00``. Note: the version of the file format is not tied +to the version of the numpy package. + +The next 2 bytes form a little-endian unsigned short int: the length of +the header data HEADER_LEN. + +The next HEADER_LEN bytes form the header data describing the array's +format. It is an ASCII string which contains a Python literal expression +of a dictionary. It is terminated by a newline (``\\n``) and padded with +spaces (``\\x20``) to make the total of +``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible +by 64 for alignment purposes. + +The dictionary contains three keys: + + "descr" : dtype.descr + An object that can be passed as an argument to the `numpy.dtype` + constructor to create the array's dtype. + "fortran_order" : bool + Whether the array data is Fortran-contiguous or not. Since + Fortran-contiguous arrays are a common form of non-C-contiguity, + we allow them to be written directly to disk for efficiency. + "shape" : tuple of int + The shape of the array. + +For repeatability and readability, the dictionary keys are sorted in +alphabetic order. This is for convenience only. A writer SHOULD implement +this if possible. A reader MUST NOT depend on this. + +Following the header comes the array data. If the dtype contains Python +objects (i.e. ``dtype.hasobject is True``), then the data is a Python +pickle of the array. Otherwise the data is the contiguous (either C- +or Fortran-, depending on ``fortran_order``) bytes of the array. +Consumers can figure out the number of bytes by multiplying the number +of elements given by the shape (noting that ``shape=()`` means there is +1 element) by ``dtype.itemsize``. + +Format Version 2.0 +------------------ + +The version 1.0 format only allowed the array header to have a total size of +65535 bytes. This can be exceeded by structured arrays with a large number of +columns. The version 2.0 format extends the header size to 4 GiB. +`numpy.save` will automatically save in 2.0 format if the data requires it, +else it will always use the more compatible 1.0 format. + +The description of the fourth element of the header therefore has become: +"The next 4 bytes form a little-endian unsigned int: the length of the header +data HEADER_LEN." + +Format Version 3.0 +------------------ + +This version replaces the ASCII string (which in practice was latin1) with +a utf8-encoded string, so supports structured types with any unicode field +names. + +Notes +----- +The ``.npy`` format, including motivation for creating it and a comparison of +alternatives, is described in the +:doc:`"npy-format" NEP `, however details have +evolved with time and this document is more current. + +""" +import io +import os +import pickle +import warnings + +import numpy +from numpy.lib._utils_impl import drop_metadata + + +__all__ = [] + + +EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} +MAGIC_PREFIX = b'\x93NUMPY' +MAGIC_LEN = len(MAGIC_PREFIX) + 2 +ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 +BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes +# allow growth within the address space of a 64 bit machine along one axis +GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype + +# difference between version 1.0 and 2.0 is a 4 byte (I) header length +# instead of 2 bytes (H) allowing storage of large structured arrays +_header_size_info = { + (1, 0): (' 255: + raise ValueError("major version must be 0 <= major < 256") + if minor < 0 or minor > 255: + raise ValueError("minor version must be 0 <= minor < 256") + return MAGIC_PREFIX + bytes([major, minor]) + +def read_magic(fp): + """ Read the magic string to get the version of the file format. + + Parameters + ---------- + fp : filelike object + + Returns + ------- + major : int + minor : int + """ + magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") + if magic_str[:-2] != MAGIC_PREFIX: + msg = "the magic string is not correct; expected %r, got %r" + raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) + major, minor = magic_str[-2:] + return major, minor + + +def dtype_to_descr(dtype): + """ + Get a serializable descriptor from the dtype. + + The .descr attribute of a dtype object cannot be round-tripped through + the dtype() constructor. Simple types, like dtype('float32'), have + a descr which looks like a record array with one field with '' as + a name. The dtype() constructor interprets this as a request to give + a default name. Instead, we construct descriptor that can be passed to + dtype(). + + Parameters + ---------- + dtype : dtype + The dtype of the array that will be written to disk. + + Returns + ------- + descr : object + An object that can be passed to `numpy.dtype()` in order to + replicate the input dtype. + + """ + # NOTE: that drop_metadata may not return the right dtype e.g. for user + # dtypes. In that case our code below would fail the same, though. + new_dtype = drop_metadata(dtype) + if new_dtype is not dtype: + warnings.warn("metadata on a dtype is not saved to an npy/npz. " + "Use another format (such as pickle) to store it.", + UserWarning, stacklevel=2) + dtype = new_dtype + + if dtype.names is not None: + # This is a record array. The .descr is fine. XXX: parts of the + # record array with an empty name, like padding bytes, still get + # fiddled with. This needs to be fixed in the C implementation of + # dtype(). + return dtype.descr + elif not type(dtype)._legacy: + # this must be a user-defined dtype since numpy does not yet expose any + # non-legacy dtypes in the public API + # + # non-legacy dtypes don't yet have __array_interface__ + # support. Instead, as a hack, we use pickle to save the array, and lie + # that the dtype is object. When the array is loaded, the descriptor is + # unpickled with the array and the object dtype in the header is + # discarded. + # + # a future NEP should define a way to serialize user-defined + # descriptors and ideally work out the possible security implications + warnings.warn("Custom dtypes are saved as python objects using the " + "pickle protocol. Loading this file requires " + "allow_pickle=True to be set.", + UserWarning, stacklevel=2) + return "|O" + else: + return dtype.str + +def descr_to_dtype(descr): + """ + Returns a dtype based off the given description. + + This is essentially the reverse of `~lib.format.dtype_to_descr`. It will + remove the valueless padding fields created by, i.e. simple fields like + dtype('float32'), and then convert the description to its corresponding + dtype. + + Parameters + ---------- + descr : object + The object retrieved by dtype.descr. Can be passed to + `numpy.dtype` in order to replicate the input dtype. + + Returns + ------- + dtype : dtype + The dtype constructed by the description. + + """ + if isinstance(descr, str): + # No padding removal needed + return numpy.dtype(descr) + elif isinstance(descr, tuple): + # subtype, will always have a shape descr[1] + dt = descr_to_dtype(descr[0]) + return numpy.dtype((dt, descr[1])) + + titles = [] + names = [] + formats = [] + offsets = [] + offset = 0 + for field in descr: + if len(field) == 2: + name, descr_str = field + dt = descr_to_dtype(descr_str) + else: + name, descr_str, shape = field + dt = numpy.dtype((descr_to_dtype(descr_str), shape)) + + # Ignore padding bytes, which will be void bytes with '' as name + # Once support for blank names is removed, only "if name == ''" needed) + is_pad = (name == '' and dt.type is numpy.void and dt.names is None) + if not is_pad: + title, name = name if isinstance(name, tuple) else (None, name) + titles.append(title) + names.append(name) + formats.append(dt) + offsets.append(offset) + offset += dt.itemsize + + return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, + 'offsets': offsets, 'itemsize': offset}) + +def header_data_from_array_1_0(array): + """ Get the dictionary of header metadata from a numpy.ndarray. + + Parameters + ---------- + array : numpy.ndarray + + Returns + ------- + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + """ + d = {'shape': array.shape} + if array.flags.c_contiguous: + d['fortran_order'] = False + elif array.flags.f_contiguous: + d['fortran_order'] = True + else: + # Totally non-contiguous data. We will have to make it C-contiguous + # before writing. Note that we need to test for C_CONTIGUOUS first + # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. + d['fortran_order'] = False + + d['descr'] = dtype_to_descr(array.dtype) + return d + + +def _wrap_header(header, version): + """ + Takes a stringified header, and attaches the prefix and padding to it + """ + import struct + assert version is not None + fmt, encoding = _header_size_info[version] + header = header.encode(encoding) + hlen = len(header) + 1 + padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN) + try: + header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) + except struct.error: + msg = "Header length {} too big for version={}".format(hlen, version) + raise ValueError(msg) from None + + # Pad the header with spaces and a final newline such that the magic + # string, the header-length short and the header are aligned on a + # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes + # aligned up to ARRAY_ALIGN on systems like Linux where mmap() + # offset must be page-aligned (i.e. the beginning of the file). + return header_prefix + header + b' '*padlen + b'\n' + + +def _wrap_header_guess_version(header): + """ + Like `_wrap_header`, but chooses an appropriate version given the contents + """ + try: + return _wrap_header(header, (1, 0)) + except ValueError: + pass + + try: + ret = _wrap_header(header, (2, 0)) + except UnicodeEncodeError: + pass + else: + warnings.warn("Stored array in format 2.0. It can only be" + "read by NumPy >= 1.9", UserWarning, stacklevel=2) + return ret + + header = _wrap_header(header, (3, 0)) + warnings.warn("Stored array in format 3.0. It can only be " + "read by NumPy >= 1.17", UserWarning, stacklevel=2) + return header + + +def _write_array_header(fp, d, version=None): + """ Write the header for an array and returns the version used + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + version : tuple or None + None means use oldest that works. Providing an explicit version will + raise a ValueError if the format does not allow saving this data. + Default: None + """ + header = ["{"] + for key, value in sorted(d.items()): + # Need to use repr here, since we eval these when reading + header.append("'%s': %s, " % (key, repr(value))) + header.append("}") + header = "".join(header) + + # Add some spare space so that the array header can be modified in-place + # when changing the array size, e.g. when growing it by appending data at + # the end. + shape = d['shape'] + header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr( + shape[-1 if d['fortran_order'] else 0] + ))) if len(shape) > 0 else 0) + + if version is None: + header = _wrap_header_guess_version(header) + else: + header = _wrap_header(header, version) + fp.write(header) + +def write_array_header_1_0(fp, d): + """ Write the header for an array using the 1.0 format. + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (1, 0)) + + +def write_array_header_2_0(fp, d): + """ Write the header for an array using the 2.0 format. + The 2.0 format allows storing very large structured arrays. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (2, 0)) + +def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE): + """ + Read an array header from a filelike object using the 1.0 file format + version. + + This will leave the file object located just after the header. + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header( + fp, version=(1, 0), max_header_size=max_header_size) + +def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE): + """ + Read an array header from a filelike object using the 2.0 file format + version. + + This will leave the file object located just after the header. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header( + fp, version=(2, 0), max_header_size=max_header_size) + + +def _filter_header(s): + """Clean up 'L' in npz header ints. + + Cleans up the 'L' in strings representing integers. Needed to allow npz + headers produced in Python2 to be read in Python3. + + Parameters + ---------- + s : string + Npy file header. + + Returns + ------- + header : str + Cleaned up header. + + """ + import tokenize + from io import StringIO + + tokens = [] + last_token_was_number = False + for token in tokenize.generate_tokens(StringIO(s).readline): + token_type = token[0] + token_string = token[1] + if (last_token_was_number and + token_type == tokenize.NAME and + token_string == "L"): + continue + else: + tokens.append(token) + last_token_was_number = (token_type == tokenize.NUMBER) + return tokenize.untokenize(tokens) + + +def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE): + """ + see read_array_header_1_0 + """ + # Read an unsigned, little-endian short int which has the length of the + # header. + import ast + import struct + hinfo = _header_size_info.get(version) + if hinfo is None: + raise ValueError("Invalid version {!r}".format(version)) + hlength_type, encoding = hinfo + + hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") + header_length = struct.unpack(hlength_type, hlength_str)[0] + header = _read_bytes(fp, header_length, "array header") + header = header.decode(encoding) + if len(header) > max_header_size: + raise ValueError( + f"Header info length ({len(header)}) is large and may not be safe " + "to load securely.\n" + "To allow loading, adjust `max_header_size` or fully trust " + "the `.npy` file using `allow_pickle=True`.\n" + "For safety against large resource use or crashes, sandboxing " + "may be necessary.") + + # The header is a pretty-printed string representation of a literal + # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte + # boundary. The keys are strings. + # "shape" : tuple of int + # "fortran_order" : bool + # "descr" : dtype.descr + # Versions (2, 0) and (1, 0) could have been created by a Python 2 + # implementation before header filtering was implemented. + # + # For performance reasons, we try without _filter_header first though + try: + d = ast.literal_eval(header) + except SyntaxError as e: + if version <= (2, 0): + header = _filter_header(header) + try: + d = ast.literal_eval(header) + except SyntaxError as e2: + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e2 + else: + warnings.warn( + "Reading `.npy` or `.npz` file required additional " + "header parsing as it was created on Python 2. Save the " + "file again to speed up loading and avoid this warning.", + UserWarning, stacklevel=4) + else: + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e + if not isinstance(d, dict): + msg = "Header is not a dictionary: {!r}" + raise ValueError(msg.format(d)) + + if EXPECTED_KEYS != d.keys(): + keys = sorted(d.keys()) + msg = "Header does not contain the correct keys: {!r}" + raise ValueError(msg.format(keys)) + + # Sanity-check the values. + if (not isinstance(d['shape'], tuple) or + not all(isinstance(x, int) for x in d['shape'])): + msg = "shape is not valid: {!r}" + raise ValueError(msg.format(d['shape'])) + if not isinstance(d['fortran_order'], bool): + msg = "fortran_order is not a valid bool: {!r}" + raise ValueError(msg.format(d['fortran_order'])) + try: + dtype = descr_to_dtype(d['descr']) + except TypeError as e: + msg = "descr is not a valid dtype descriptor: {!r}" + raise ValueError(msg.format(d['descr'])) from e + + return d['shape'], d['fortran_order'], dtype + +def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): + """ + Write an array to an NPY file, including a header. + + If the array is neither C-contiguous nor Fortran-contiguous AND the + file_like object is not a real file object, this function will have to + copy data in memory. + + Parameters + ---------- + fp : file_like object + An open, writable file object, or similar object with a + ``.write()`` method. + array : ndarray + The array to write to disk. + version : (int, int) or None, optional + The version number of the format. None means use the oldest + supported version that is able to store the data. Default: None + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: True + pickle_kwargs : dict, optional + Additional keyword arguments to pass to pickle.dump, excluding + 'protocol'. These are only useful when pickling objects in object + arrays on Python 3 to Python 2 compatible format. + + Raises + ------ + ValueError + If the array cannot be persisted. This includes the case of + allow_pickle=False and array being an object array. + Various other errors + If the array contains Python objects as part of its dtype, the + process of pickling them may raise various errors if the objects + are not picklable. + + """ + _check_version(version) + _write_array_header(fp, header_data_from_array_1_0(array), version) + + if array.itemsize == 0: + buffersize = 0 + else: + # Set buffer size to 16 MiB to hide the Python loop overhead. + buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) + + dtype_class = type(array.dtype) + + if array.dtype.hasobject or not dtype_class._legacy: + # We contain Python objects so we cannot write out the data + # directly. Instead, we will pickle it out + if not allow_pickle: + if array.dtype.hasobject: + raise ValueError("Object arrays cannot be saved when " + "allow_pickle=False") + if not dtype_class._legacy: + raise ValueError("User-defined dtypes cannot be saved " + "when allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + pickle.dump(array, fp, protocol=4, **pickle_kwargs) + elif array.flags.f_contiguous and not array.flags.c_contiguous: + if isfileobj(fp): + array.T.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='F'): + fp.write(chunk.tobytes('C')) + else: + if isfileobj(fp): + array.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='C'): + fp.write(chunk.tobytes('C')) + + +def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, + max_header_size=_MAX_HEADER_SIZE): + """ + Read an array from an NPY file. + + Parameters + ---------- + fp : file_like object + If this is not a real file object, then this may take extra memory + and time. + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: False + + .. versionchanged:: 1.16.3 + Made default False in response to CVE-2019-6446. + + pickle_kwargs : dict + Additional keyword arguments to pass to pickle.load. These are only + useful when loading object arrays saved on Python 2 when using + Python 3. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Returns + ------- + array : ndarray + The array from the data on disk. + + Raises + ------ + ValueError + If the data is invalid, or allow_pickle=False and the file contains + an object array. + + """ + if allow_pickle: + # Effectively ignore max_header_size, since `allow_pickle` indicates + # that the input is fully trusted. + max_header_size = 2**64 + + version = read_magic(fp) + _check_version(version) + shape, fortran_order, dtype = _read_array_header( + fp, version, max_header_size=max_header_size) + if len(shape) == 0: + count = 1 + else: + count = numpy.multiply.reduce(shape, dtype=numpy.int64) + + # Now read the actual data. + if dtype.hasobject: + # The array contained Python objects. We need to unpickle the data. + if not allow_pickle: + raise ValueError("Object arrays cannot be loaded when " + "allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + try: + array = pickle.load(fp, **pickle_kwargs) + except UnicodeError as err: + # Friendlier error message + raise UnicodeError("Unpickling a python object failed: %r\n" + "You may need to pass the encoding= option " + "to numpy.load" % (err,)) from err + else: + if isfileobj(fp): + # We can use the fast fromfile() function. + array = numpy.fromfile(fp, dtype=dtype, count=count) + else: + # This is not a real file. We have to read it the + # memory-intensive way. + # crc32 module fails on reads greater than 2 ** 32 bytes, + # breaking large reads from gzip streams. Chunk reads to + # BUFFER_SIZE bytes to avoid issue and reduce memory overhead + # of the read. In non-chunked case count < max_read_count, so + # only one read is performed. + + # Use np.ndarray instead of np.empty since the latter does + # not correctly instantiate zero-width string dtypes; see + # https://github.com/numpy/numpy/pull/6430 + array = numpy.ndarray(count, dtype=dtype) + + if dtype.itemsize > 0: + # If dtype.itemsize == 0 then there's nothing more to read + max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) + + for i in range(0, count, max_read_count): + read_count = min(max_read_count, count - i) + read_size = int(read_count * dtype.itemsize) + data = _read_bytes(fp, read_size, "array data") + array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, + count=read_count) + + if fortran_order: + array.shape = shape[::-1] + array = array.transpose() + else: + array.shape = shape + + return array + + +def open_memmap(filename, mode='r+', dtype=None, shape=None, + fortran_order=False, version=None, *, + max_header_size=_MAX_HEADER_SIZE): + """ + Open a .npy file as a memory-mapped array. + + This may be used to read an existing file or create a new one. + + Parameters + ---------- + filename : str or path-like + The name of the file on disk. This may *not* be a file-like + object. + mode : str, optional + The mode in which to open the file; the default is 'r+'. In + addition to the standard file modes, 'c' is also accepted to mean + "copy on write." See `memmap` for the available mode strings. + dtype : data-type, optional + The data type of the array if we are creating a new file in "write" + mode, if not, `dtype` is ignored. The default value is None, which + results in a data-type of `float64`. + shape : tuple of int + The shape of the array if we are creating a new file in "write" + mode, in which case this parameter is required. Otherwise, this + parameter is ignored and is thus optional. + fortran_order : bool, optional + Whether the array should be Fortran-contiguous (True) or + C-contiguous (False, the default) if we are creating a new file in + "write" mode. + version : tuple of int (major, minor) or None + If the mode is a "write" mode, then this is the version of the file + format used to create the file. None means use the oldest + supported version that is able to store the data. Default: None + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Returns + ------- + marray : memmap + The memory-mapped array. + + Raises + ------ + ValueError + If the data or the mode is invalid. + OSError + If the file is not found or cannot be opened correctly. + + See Also + -------- + numpy.memmap + + """ + if isfileobj(filename): + raise ValueError("Filename must be a string or a path-like object." + " Memmap cannot use existing file handles.") + + if 'w' in mode: + # We are creating the file, not reading it. + # Check if we ought to create the file. + _check_version(version) + # Ensure that the given dtype is an authentic dtype object rather + # than just something that can be interpreted as a dtype object. + dtype = numpy.dtype(dtype) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + d = dict( + descr=dtype_to_descr(dtype), + fortran_order=fortran_order, + shape=shape, + ) + # If we got here, then it should be safe to create the file. + with open(os.fspath(filename), mode+'b') as fp: + _write_array_header(fp, d, version) + offset = fp.tell() + else: + # Read the header of the file first. + with open(os.fspath(filename), 'rb') as fp: + version = read_magic(fp) + _check_version(version) + + shape, fortran_order, dtype = _read_array_header( + fp, version, max_header_size=max_header_size) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + offset = fp.tell() + + if fortran_order: + order = 'F' + else: + order = 'C' + + # We need to change a write-only mode to a read-write mode since we've + # already written data to the file. + if mode == 'w+': + mode = 'r+' + + marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, + mode=mode, offset=offset) + + return marray + + +def _read_bytes(fp, size, error_template="ran out of data"): + """ + Read from file-like object until size bytes are read. + Raises ValueError if not EOF is encountered before size bytes are read. + Non-blocking objects only supported if they derive from io objects. + + Required as e.g. ZipExtFile in python 2.6 can return less data than + requested. + """ + data = bytes() + while True: + # io files (default in python3) return None or raise on + # would-block, python2 file will truncate, probably nothing can be + # done about that. note that regular files can't be non-blocking + try: + r = fp.read(size - len(data)) + data += r + if len(r) == 0 or len(data) == size: + break + except BlockingIOError: + pass + if len(data) != size: + msg = "EOF: reading %s, expected %d bytes got %d" + raise ValueError(msg % (error_template, size, len(data))) + else: + return data + + +def isfileobj(f): + if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)): + return False + try: + # BufferedReader/Writer may raise OSError when + # fetching `fileno()` (e.g. when wrapping BytesIO). + f.fileno() + return True + except OSError: + return False diff --git a/phivenv/Lib/site-packages/numpy/lib/format.pyi b/phivenv/Lib/site-packages/numpy/lib/format.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b45b8b572d05592e9adbb6d59730b9680c6a2f72 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/format.pyi @@ -0,0 +1,22 @@ +from typing import Any, Literal, Final + +__all__: list[str] + +EXPECTED_KEYS: Final[set[str]] +MAGIC_PREFIX: Final[bytes] +MAGIC_LEN: Literal[8] +ARRAY_ALIGN: Literal[64] +BUFFER_SIZE: Literal[262144] # 2**18 + +def magic(major, minor): ... +def read_magic(fp): ... +def dtype_to_descr(dtype): ... +def descr_to_dtype(descr): ... +def header_data_from_array_1_0(array): ... +def write_array_header_1_0(fp, d): ... +def write_array_header_2_0(fp, d): ... +def read_array_header_1_0(fp): ... +def read_array_header_2_0(fp): ... +def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ... +def read_array(fp, allow_pickle=..., pickle_kwargs=...): ... +def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ... diff --git a/phivenv/Lib/site-packages/numpy/lib/introspect.py b/phivenv/Lib/site-packages/numpy/lib/introspect.py new file mode 100644 index 0000000000000000000000000000000000000000..bc9bd5d295f63abe8f3b807f6948bf6ed2f9cbad --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/introspect.py @@ -0,0 +1,95 @@ +""" +Introspection helper functions. +""" +import re + +__all__ = ['opt_func_info'] + + +def opt_func_info(func_name=None, signature=None): + """ + Returns a dictionary containing the currently supported CPU dispatched + features for all optimized functions. + + Parameters + ---------- + func_name : str (optional) + Regular expression to filter by function name. + + signature : str (optional) + Regular expression to filter by data type. + + Returns + ------- + dict + A dictionary where keys are optimized function names and values are + nested dictionaries indicating supported targets based on data types. + + Examples + -------- + Retrieve dispatch information for functions named 'add' or 'sub' and + data types 'float64' or 'float32': + + >>> dict = np.lib.introspect.opt_func_info( + ... func_name="add|abs", signature="float64|complex64" + ... ) + >>> import json + >>> print(json.dumps(dict, indent=2)) + { + "absolute": { + "dd": { + "current": "SSE41", + "available": "SSE41 baseline(SSE SSE2 SSE3)" + }, + "Ff": { + "current": "FMA3__AVX2", + "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" + }, + "Dd": { + "current": "FMA3__AVX2", + "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" + } + }, + "add": { + "ddd": { + "current": "FMA3__AVX2", + "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" + }, + "FFF": { + "current": "FMA3__AVX2", + "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" + } + } + } + + """ + from numpy._core._multiarray_umath import ( + __cpu_targets_info__ as targets, dtype + ) + + if func_name is not None: + func_pattern = re.compile(func_name) + matching_funcs = { + k: v for k, v in targets.items() + if func_pattern.search(k) + } + else: + matching_funcs = targets + + if signature is not None: + sig_pattern = re.compile(signature) + matching_sigs = {} + for k, v in matching_funcs.items(): + matching_chars = {} + for chars, targets in v.items(): + if any([ + sig_pattern.search(c) or + sig_pattern.search(dtype(c).name) + for c in chars + ]): + matching_chars[chars] = targets + if matching_chars: + matching_sigs[k] = matching_chars + else: + matching_sigs = matching_funcs + return matching_sigs diff --git a/phivenv/Lib/site-packages/numpy/lib/mixins.py b/phivenv/Lib/site-packages/numpy/lib/mixins.py new file mode 100644 index 0000000000000000000000000000000000000000..2f2cc31b04892e0e91fffd5b9ed94bb7f95b1c42 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/mixins.py @@ -0,0 +1,183 @@ +""" +Mixin classes for custom array types that don't inherit from ndarray. +""" +from numpy._core import umath as um + + +__all__ = ['NDArrayOperatorsMixin'] + + +def _disables_array_ufunc(obj): + """True when __array_ufunc__ is set to None.""" + try: + return obj.__array_ufunc__ is None + except AttributeError: + return False + + +def _binary_method(ufunc, name): + """Implement a forward binary method with a ufunc, e.g., __add__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(self, other) + func.__name__ = '__{}__'.format(name) + return func + + +def _reflected_binary_method(ufunc, name): + """Implement a reflected binary method with a ufunc, e.g., __radd__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(other, self) + func.__name__ = '__r{}__'.format(name) + return func + + +def _inplace_binary_method(ufunc, name): + """Implement an in-place binary method with a ufunc, e.g., __iadd__.""" + def func(self, other): + return ufunc(self, other, out=(self,)) + func.__name__ = '__i{}__'.format(name) + return func + + +def _numeric_methods(ufunc, name): + """Implement forward, reflected and inplace binary methods with a ufunc.""" + return (_binary_method(ufunc, name), + _reflected_binary_method(ufunc, name), + _inplace_binary_method(ufunc, name)) + + +def _unary_method(ufunc, name): + """Implement a unary special method with a ufunc.""" + def func(self): + return ufunc(self) + func.__name__ = '__{}__'.format(name) + return func + + +class NDArrayOperatorsMixin: + """Mixin defining all operator special methods using __array_ufunc__. + + This class implements the special methods for almost all of Python's + builtin operators defined in the `operator` module, including comparisons + (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by + deferring to the ``__array_ufunc__`` method, which subclasses must + implement. + + It is useful for writing classes that do not inherit from `numpy.ndarray`, + but that should support arithmetic and numpy universal functions like + arrays as described in `A Mechanism for Overriding Ufuncs + `_. + + As an trivial example, consider this implementation of an ``ArrayLike`` + class that simply wraps a NumPy array and ensures that the result of any + arithmetic operation is also an ``ArrayLike`` object: + + >>> import numbers + >>> class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + ... def __init__(self, value): + ... self.value = np.asarray(value) + ... + ... # One might also consider adding the built-in list type to this + ... # list, to support operations like np.add(array_like, list) + ... _HANDLED_TYPES = (np.ndarray, numbers.Number) + ... + ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + ... out = kwargs.get('out', ()) + ... for x in inputs + out: + ... # Only support operations with instances of + ... # _HANDLED_TYPES. Use ArrayLike instead of type(self) + ... # for isinstance to allow subclasses that don't + ... # override __array_ufunc__ to handle ArrayLike objects. + ... if not isinstance( + ... x, self._HANDLED_TYPES + (ArrayLike,) + ... ): + ... return NotImplemented + ... + ... # Defer to the implementation of the ufunc + ... # on unwrapped values. + ... inputs = tuple(x.value if isinstance(x, ArrayLike) else x + ... for x in inputs) + ... if out: + ... kwargs['out'] = tuple( + ... x.value if isinstance(x, ArrayLike) else x + ... for x in out) + ... result = getattr(ufunc, method)(*inputs, **kwargs) + ... + ... if type(result) is tuple: + ... # multiple return values + ... return tuple(type(self)(x) for x in result) + ... elif method == 'at': + ... # no return value + ... return None + ... else: + ... # one return value + ... return type(self)(result) + ... + ... def __repr__(self): + ... return '%s(%r)' % (type(self).__name__, self.value) + + In interactions between ``ArrayLike`` objects and numbers or numpy arrays, + the result is always another ``ArrayLike``: + + >>> x = ArrayLike([1, 2, 3]) + >>> x - 1 + ArrayLike(array([0, 1, 2])) + >>> 1 - x + ArrayLike(array([ 0, -1, -2])) + >>> np.arange(3) - x + ArrayLike(array([-1, -1, -1])) + >>> x - np.arange(3) + ArrayLike(array([1, 1, 1])) + + Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations + with arbitrary, unrecognized types. This ensures that interactions with + ArrayLike preserve a well-defined casting hierarchy. + + .. versionadded:: 1.13 + """ + __slots__ = () + # Like np.ndarray, this mixin class implements "Option 1" from the ufunc + # overrides NEP. + + # comparisons don't have reflected and in-place versions + __lt__ = _binary_method(um.less, 'lt') + __le__ = _binary_method(um.less_equal, 'le') + __eq__ = _binary_method(um.equal, 'eq') + __ne__ = _binary_method(um.not_equal, 'ne') + __gt__ = _binary_method(um.greater, 'gt') + __ge__ = _binary_method(um.greater_equal, 'ge') + + # numeric methods + __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add') + __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub') + __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul') + __matmul__, __rmatmul__, __imatmul__ = _numeric_methods( + um.matmul, 'matmul') + # Python 3 does not use __div__, __rdiv__, or __idiv__ + __truediv__, __rtruediv__, __itruediv__ = _numeric_methods( + um.true_divide, 'truediv') + __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods( + um.floor_divide, 'floordiv') + __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod') + __divmod__ = _binary_method(um.divmod, 'divmod') + __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod') + # __idivmod__ does not exist + # TODO: handle the optional third argument for __pow__? + __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow') + __lshift__, __rlshift__, __ilshift__ = _numeric_methods( + um.left_shift, 'lshift') + __rshift__, __rrshift__, __irshift__ = _numeric_methods( + um.right_shift, 'rshift') + __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and') + __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor') + __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or') + + # unary methods + __neg__ = _unary_method(um.negative, 'neg') + __pos__ = _unary_method(um.positive, 'pos') + __abs__ = _unary_method(um.absolute, 'abs') + __invert__ = _unary_method(um.invert, 'invert') diff --git a/phivenv/Lib/site-packages/numpy/lib/mixins.pyi b/phivenv/Lib/site-packages/numpy/lib/mixins.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b80dd0c2da9de6f8b8d5f9e3bf01331a035bcd51 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/mixins.pyi @@ -0,0 +1,74 @@ +from abc import ABCMeta, abstractmethod +from typing import Literal as L, Any + +from numpy import ufunc + +__all__: list[str] + +# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass, +# even though it's reliant on subclasses implementing `__array_ufunc__` + +# NOTE: The accepted input- and output-types of the various dunders are +# completely dependent on how `__array_ufunc__` is implemented. +# As such, only little type safety can be provided here. + +class NDArrayOperatorsMixin(metaclass=ABCMeta): + @abstractmethod + def __array_ufunc__( + self, + ufunc: ufunc, + method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + def __lt__(self, other: Any) -> Any: ... + def __le__(self, other: Any) -> Any: ... + def __eq__(self, other: Any) -> Any: ... + def __ne__(self, other: Any) -> Any: ... + def __gt__(self, other: Any) -> Any: ... + def __ge__(self, other: Any) -> Any: ... + def __add__(self, other: Any) -> Any: ... + def __radd__(self, other: Any) -> Any: ... + def __iadd__(self, other: Any) -> Any: ... + def __sub__(self, other: Any) -> Any: ... + def __rsub__(self, other: Any) -> Any: ... + def __isub__(self, other: Any) -> Any: ... + def __mul__(self, other: Any) -> Any: ... + def __rmul__(self, other: Any) -> Any: ... + def __imul__(self, other: Any) -> Any: ... + def __matmul__(self, other: Any) -> Any: ... + def __rmatmul__(self, other: Any) -> Any: ... + def __imatmul__(self, other: Any) -> Any: ... + def __truediv__(self, other: Any) -> Any: ... + def __rtruediv__(self, other: Any) -> Any: ... + def __itruediv__(self, other: Any) -> Any: ... + def __floordiv__(self, other: Any) -> Any: ... + def __rfloordiv__(self, other: Any) -> Any: ... + def __ifloordiv__(self, other: Any) -> Any: ... + def __mod__(self, other: Any) -> Any: ... + def __rmod__(self, other: Any) -> Any: ... + def __imod__(self, other: Any) -> Any: ... + def __divmod__(self, other: Any) -> Any: ... + def __rdivmod__(self, other: Any) -> Any: ... + def __pow__(self, other: Any) -> Any: ... + def __rpow__(self, other: Any) -> Any: ... + def __ipow__(self, other: Any) -> Any: ... + def __lshift__(self, other: Any) -> Any: ... + def __rlshift__(self, other: Any) -> Any: ... + def __ilshift__(self, other: Any) -> Any: ... + def __rshift__(self, other: Any) -> Any: ... + def __rrshift__(self, other: Any) -> Any: ... + def __irshift__(self, other: Any) -> Any: ... + def __and__(self, other: Any) -> Any: ... + def __rand__(self, other: Any) -> Any: ... + def __iand__(self, other: Any) -> Any: ... + def __xor__(self, other: Any) -> Any: ... + def __rxor__(self, other: Any) -> Any: ... + def __ixor__(self, other: Any) -> Any: ... + def __or__(self, other: Any) -> Any: ... + def __ror__(self, other: Any) -> Any: ... + def __ior__(self, other: Any) -> Any: ... + def __neg__(self) -> Any: ... + def __pos__(self) -> Any: ... + def __abs__(self) -> Any: ... + def __invert__(self) -> Any: ... diff --git a/phivenv/Lib/site-packages/numpy/lib/npyio.py b/phivenv/Lib/site-packages/numpy/lib/npyio.py new file mode 100644 index 0000000000000000000000000000000000000000..2405df33d47ea190cb9903de8c534253c161b01b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/npyio.py @@ -0,0 +1,3 @@ +from ._npyio_impl import ( + __doc__, DataSource, NpzFile +) diff --git a/phivenv/Lib/site-packages/numpy/lib/npyio.pyi b/phivenv/Lib/site-packages/numpy/lib/npyio.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bfb3129823bd757bddde8b97363f2ce16cdbcdc5 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/npyio.pyi @@ -0,0 +1,4 @@ +from numpy.lib._npyio_impl import ( + DataSource as DataSource, + NpzFile as NpzFile, +) diff --git a/phivenv/Lib/site-packages/numpy/lib/recfunctions.py b/phivenv/Lib/site-packages/numpy/lib/recfunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..a42661b079e81af245460f3edfc24bfdb5ae4c16 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/recfunctions.py @@ -0,0 +1,1674 @@ +""" +Collection of utilities to manipulate structured arrays. + +Most of these functions were initially implemented by John Hunter for +matplotlib. They have been rewritten and extended for convenience. + +""" +import itertools +import numpy as np +import numpy.ma as ma +from numpy import ndarray +from numpy.ma import MaskedArray +from numpy.ma.mrecords import MaskedRecords +from numpy._core.overrides import array_function_dispatch +from numpy._core.records import recarray +from numpy.lib._iotools import _is_string_like + +_check_fill_value = np.ma.core._check_fill_value + + +__all__ = [ + 'append_fields', 'apply_along_fields', 'assign_fields_by_name', + 'drop_fields', 'find_duplicates', 'flatten_descr', + 'get_fieldstructure', 'get_names', 'get_names_flat', + 'join_by', 'merge_arrays', 'rec_append_fields', + 'rec_drop_fields', 'rec_join', 'recursive_fill_fields', + 'rename_fields', 'repack_fields', 'require_fields', + 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured', + ] + + +def _recursive_fill_fields_dispatcher(input, output): + return (input, output) + + +@array_function_dispatch(_recursive_fill_fields_dispatcher) +def recursive_fill_fields(input, output): + """ + Fills fields from output with fields from input, + with support for nested structures. + + Parameters + ---------- + input : ndarray + Input array. + output : ndarray + Output array. + + Notes + ----- + * `output` should be at least the same size as `input` + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)]) + >>> b = np.zeros((3,), dtype=a.dtype) + >>> rfn.recursive_fill_fields(a, b) + array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) + >>> dt.descr + [(('a', 'A'), '>> _get_fieldspec(dt) + [(('a', 'A'), dtype('int64')), ('b', dtype(('>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype) + ('A',) + >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names(adtype) + ('a', ('b', ('ba', 'bb'))) + """ + listnames = [] + names = adtype.names + for name in names: + current = adtype[name] + if current.names is not None: + listnames.append((name, tuple(get_names(current)))) + else: + listnames.append(name) + return tuple(listnames) + + +def get_names_flat(adtype): + """ + Returns the field names of the input datatype as a tuple. Input datatype + must have fields otherwise error is raised. + Nested structure are flattened beforehand. + + Parameters + ---------- + adtype : dtype + Input datatype + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None + False + >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names_flat(adtype) + ('a', 'b', 'ba', 'bb') + """ + listnames = [] + names = adtype.names + for name in names: + listnames.append(name) + current = adtype[name] + if current.names is not None: + listnames.extend(get_names_flat(current)) + return tuple(listnames) + + +def flatten_descr(ndtype): + """ + Flatten a structured data-type description. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) + (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) + + """ + names = ndtype.names + if names is None: + return (('', ndtype),) + else: + descr = [] + for field in names: + (typ, _) = ndtype.fields[field] + if typ.names is not None: + descr.extend(flatten_descr(typ)) + else: + descr.append((field, typ)) + return tuple(descr) + + +def _zip_dtype(seqarrays, flatten=False): + newdtype = [] + if flatten: + for a in seqarrays: + newdtype.extend(flatten_descr(a.dtype)) + else: + for a in seqarrays: + current = a.dtype + if current.names is not None and len(current.names) == 1: + # special case - dtypes of 1 field are flattened + newdtype.extend(_get_fieldspec(current)) + else: + newdtype.append(('', current)) + return np.dtype(newdtype) + + +def _zip_descr(seqarrays, flatten=False): + """ + Combine the dtype description of a series of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays + flatten : {boolean}, optional + Whether to collapse nested descriptions. + """ + return _zip_dtype(seqarrays, flatten=flatten).descr + + +def get_fieldstructure(adtype, lastname=None, parents=None,): + """ + Returns a dictionary with fields indexing lists of their parent fields. + + This function is used to simplify access to fields nested in other fields. + + Parameters + ---------- + adtype : np.dtype + Input datatype + lastname : optional + Last processed field name (used internally during recursion). + parents : dictionary + Dictionary of parent fields (used interbally during recursion). + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('A', int), + ... ('B', [('BA', int), + ... ('BB', [('BBA', int), ('BBB', int)])])]) + >>> rfn.get_fieldstructure(ndtype) + ... # XXX: possible regression, order of BBA and BBB is swapped + {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + + """ + if parents is None: + parents = {} + names = adtype.names + for name in names: + current = adtype[name] + if current.names is not None: + if lastname: + parents[name] = [lastname, ] + else: + parents[name] = [] + parents.update(get_fieldstructure(current, name, parents)) + else: + lastparent = [_ for _ in (parents.get(lastname, []) or [])] + if lastparent: + lastparent.append(lastname) + elif lastname: + lastparent = [lastname, ] + parents[name] = lastparent or [] + return parents + + +def _izip_fields_flat(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays, + collapsing any nested structure. + + """ + for element in iterable: + if isinstance(element, np.void): + yield from _izip_fields_flat(tuple(element)) + else: + yield element + + +def _izip_fields(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays. + + """ + for element in iterable: + if (hasattr(element, '__iter__') and + not isinstance(element, str)): + yield from _izip_fields(element) + elif isinstance(element, np.void) and len(tuple(element)) == 1: + # this statement is the same from the previous expression + yield from _izip_fields(element) + else: + yield element + + +def _izip_records(seqarrays, fill_value=None, flatten=True): + """ + Returns an iterator of concatenated items from a sequence of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays. + fill_value : {None, integer} + Value used to pad shorter iterables. + flatten : {True, False}, + Whether to + """ + + # Should we flatten the items, or just use a nested approach + if flatten: + zipfunc = _izip_fields_flat + else: + zipfunc = _izip_fields + + for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value): + yield tuple(zipfunc(tup)) + + +def _fix_output(output, usemask=True, asrecarray=False): + """ + Private function: return a recarray, a ndarray, a MaskedArray + or a MaskedRecords depending on the input parameters + """ + if not isinstance(output, MaskedArray): + usemask = False + if usemask: + if asrecarray: + output = output.view(MaskedRecords) + else: + output = ma.filled(output) + if asrecarray: + output = output.view(recarray) + return output + + +def _fix_defaults(output, defaults=None): + """ + Update the fill_value and masked data of `output` + from the default given in a dictionary defaults. + """ + names = output.dtype.names + (data, mask, fill_value) = (output.data, output.mask, output.fill_value) + for (k, v) in (defaults or {}).items(): + if k in names: + fill_value[k] = v + data[k][mask[k]] = v + return output + + +def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None, + usemask=None, asrecarray=None): + return seqarrays + + +@array_function_dispatch(_merge_arrays_dispatcher) +def merge_arrays(seqarrays, fill_value=-1, flatten=False, + usemask=False, asrecarray=False): + """ + Merge arrays field by field. + + Parameters + ---------- + seqarrays : sequence of ndarrays + Sequence of arrays + fill_value : {float}, optional + Filling value used to pad missing data on the shorter arrays. + flatten : {False, True}, optional + Whether to collapse nested fields. + usemask : {False, True}, optional + Whether to return a masked array or not. + asrecarray : {False, True}, optional + Whether to return a recarray (MaskedRecords) or not. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) + array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64), + ... np.array([10., 20., 30.])), usemask=False) + array([(1, 10.0), (2, 20.0), (-1, 30.0)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]), + ... np.array([10., 20., 30.])), + ... usemask=False, asrecarray=True) + rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('a', '>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])]) + >>> rfn.drop_fields(a, 'a') + array([((2., 3),), ((5., 6),)], + dtype=[('b', [('ba', '>> rfn.drop_fields(a, 'ba') + array([(1, (3,)), (4, (6,))], dtype=[('a', '>> rfn.drop_fields(a, ['ba', 'bb']) + array([(1,), (4,)], dtype=[('a', '>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) + >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) + array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))], + dtype=[('A', ' 1: + data = merge_arrays(data, flatten=True, usemask=usemask, + fill_value=fill_value) + else: + data = data.pop() + # + output = ma.masked_all( + max(len(base), len(data)), + dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype)) + output = recursive_fill_fields(base, output) + output = recursive_fill_fields(data, output) + # + return _fix_output(output, usemask=usemask, asrecarray=asrecarray) + + +def _rec_append_fields_dispatcher(base, names, data, dtypes=None): + yield base + yield from data + + +@array_function_dispatch(_rec_append_fields_dispatcher) +def rec_append_fields(base, names, data, dtypes=None): + """ + Add new fields to an existing array. + + The names of the fields are given with the `names` arguments, + the corresponding values with the `data` arguments. + If a single field is appended, `names`, `data` and `dtypes` do not have + to be lists but just values. + + Parameters + ---------- + base : array + Input array to extend. + names : string, sequence + String or sequence of strings corresponding to the names + of the new fields. + data : array or sequence of arrays + Array or sequence of arrays storing the fields to add to the base. + dtypes : sequence of datatypes, optional + Datatype or sequence of datatypes. + If None, the datatypes are estimated from the `data`. + + See Also + -------- + append_fields + + Returns + ------- + appended_array : np.recarray + """ + return append_fields(base, names, data=data, dtypes=dtypes, + asrecarray=True, usemask=False) + + +def _repack_fields_dispatcher(a, align=None, recurse=None): + return (a,) + + +@array_function_dispatch(_repack_fields_dispatcher) +def repack_fields(a, align=False, recurse=False): + """ + Re-pack the fields of a structured array or dtype in memory. + + The memory layout of structured datatypes allows fields at arbitrary + byte offsets. This means the fields can be separated by padding bytes, + their offsets can be non-monotonically increasing, and they can overlap. + + This method removes any overlaps and reorders the fields in memory so they + have increasing byte offsets, and adds or removes padding bytes depending + on the `align` option, which behaves like the `align` option to + `numpy.dtype`. + + If `align=False`, this method produces a "packed" memory layout in which + each field starts at the byte the previous field ended, and any padding + bytes are removed. + + If `align=True`, this methods produces an "aligned" memory layout in which + each field's offset is a multiple of its alignment, and the total itemsize + is a multiple of the largest alignment, by adding padding bytes as needed. + + Parameters + ---------- + a : ndarray or dtype + array or dtype for which to repack the fields. + align : boolean + If true, use an "aligned" memory layout, otherwise use a "packed" layout. + recurse : boolean + If True, also repack nested structures. + + Returns + ------- + repacked : ndarray or dtype + Copy of `a` with fields repacked, or `a` itself if no repacking was + needed. + + Examples + -------- + + >>> from numpy.lib import recfunctions as rfn + >>> def print_offsets(d): + ... print("offsets:", [d.fields[name][1] for name in d.names]) + ... print("itemsize:", d.itemsize) + ... + >>> dt = np.dtype('u1, >> dt + dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '>> print_offsets(dt) + offsets: [0, 8, 16] + itemsize: 24 + >>> packed_dt = rfn.repack_fields(dt) + >>> packed_dt + dtype([('f0', 'u1'), ('f1', '>> print_offsets(packed_dt) + offsets: [0, 1, 9] + itemsize: 17 + + """ + if not isinstance(a, np.dtype): + dt = repack_fields(a.dtype, align=align, recurse=recurse) + return a.astype(dt, copy=False) + + if a.names is None: + return a + + fieldinfo = [] + for name in a.names: + tup = a.fields[name] + if recurse: + fmt = repack_fields(tup[0], align=align, recurse=True) + else: + fmt = tup[0] + + if len(tup) == 3: + name = (tup[2], name) + + fieldinfo.append((name, fmt)) + + dt = np.dtype(fieldinfo, align=align) + return np.dtype((a.type, dt)) + +def _get_fields_and_offsets(dt, offset=0): + """ + Returns a flat list of (dtype, count, offset) tuples of all the + scalar fields in the dtype "dt", including nested fields, in left + to right order. + """ + + # counts up elements in subarrays, including nested subarrays, and returns + # base dtype and count + def count_elem(dt): + count = 1 + while dt.shape != (): + for size in dt.shape: + count *= size + dt = dt.base + return dt, count + + fields = [] + for name in dt.names: + field = dt.fields[name] + f_dt, f_offset = field[0], field[1] + f_dt, n = count_elem(f_dt) + + if f_dt.names is None: + fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset)) + else: + subfields = _get_fields_and_offsets(f_dt, f_offset + offset) + size = f_dt.itemsize + + for i in range(n): + if i == 0: + # optimization: avoid list comprehension if no subarray + fields.extend(subfields) + else: + fields.extend([(d, c, o + i*size) for d, c, o in subfields]) + return fields + +def _common_stride(offsets, counts, itemsize): + """ + Returns the stride between the fields, or None if the stride is not + constant. The values in "counts" designate the lengths of + subarrays. Subarrays are treated as many contiguous fields, with + always positive stride. + """ + if len(offsets) <= 1: + return itemsize + + negative = offsets[1] < offsets[0] # negative stride + if negative: + # reverse, so offsets will be ascending + it = zip(reversed(offsets), reversed(counts)) + else: + it = zip(offsets, counts) + + prev_offset = None + stride = None + for offset, count in it: + if count != 1: # subarray: always c-contiguous + if negative: + return None # subarrays can never have a negative stride + if stride is None: + stride = itemsize + if stride != itemsize: + return None + end_offset = offset + (count - 1) * itemsize + else: + end_offset = offset + + if prev_offset is not None: + new_stride = offset - prev_offset + if stride is None: + stride = new_stride + if stride != new_stride: + return None + + prev_offset = end_offset + + if negative: + return -stride + return stride + + +def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None, + casting=None): + return (arr,) + +@array_function_dispatch(_structured_to_unstructured_dispatcher) +def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): + """ + Converts an n-D structured array into an (n+1)-D unstructured array. + + The new array will have a new last dimension equal in size to the + number of field-elements of the input array. If not supplied, the output + datatype is determined from the numpy type promotion rules applied to all + the field datatypes. + + Nested fields, as well as each element of any subarray fields, all count + as a single field-elements. + + Parameters + ---------- + arr : ndarray + Structured array or dtype to convert. Cannot contain object datatype. + dtype : dtype, optional + The dtype of the output unstructured array. + copy : bool, optional + If true, always return a copy. If false, a view is returned if + possible, such as when the `dtype` and strides of the fields are + suitable and the array subtype is one of `numpy.ndarray`, + `numpy.recarray` or `numpy.memmap`. + + .. versionchanged:: 1.25.0 + A view can now be returned if the fields are separated by a + uniform stride. + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `numpy.ndarray.astype`. Controls what kind of + data casting may occur. + + Returns + ------- + unstructured : ndarray + Unstructured array with one more dimension. + + Examples + -------- + + >>> from numpy.lib import recfunctions as rfn + >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a + array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]), + (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])], + dtype=[('a', '>> rfn.structured_to_unstructured(a) + array([[0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.]]) + + >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1) + array([ 3. , 5.5, 9. , 11. ]) + + """ + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + fields = _get_fields_and_offsets(arr.dtype) + n_fields = len(fields) + if n_fields == 0 and dtype is None: + raise ValueError("arr has no fields. Unable to guess dtype") + elif n_fields == 0: + # too many bugs elsewhere for this to work now + raise NotImplementedError("arr with no fields is not supported") + + dts, counts, offsets = zip(*fields) + names = ['f{}'.format(n) for n in range(n_fields)] + + if dtype is None: + out_dtype = np.result_type(*[dt.base for dt in dts]) + else: + out_dtype = np.dtype(dtype) + + # Use a series of views and casts to convert to an unstructured array: + + # first view using flattened fields (doesn't work for object arrays) + # Note: dts may include a shape for subarrays + flattened_fields = np.dtype({'names': names, + 'formats': dts, + 'offsets': offsets, + 'itemsize': arr.dtype.itemsize}) + arr = arr.view(flattened_fields) + + # we only allow a few types to be unstructured by manipulating the + # strides, because we know it won't work with, for example, np.matrix nor + # np.ma.MaskedArray. + can_view = type(arr) in (np.ndarray, np.recarray, np.memmap) + if (not copy) and can_view and all(dt.base == out_dtype for dt in dts): + # all elements have the right dtype already; if they have a common + # stride, we can just return a view + common_stride = _common_stride(offsets, counts, out_dtype.itemsize) + if common_stride is not None: + wrap = arr.__array_wrap__ + + new_shape = arr.shape + (sum(counts), out_dtype.itemsize) + new_strides = arr.strides + (abs(common_stride), 1) + + arr = arr[..., np.newaxis].view(np.uint8) # view as bytes + arr = arr[..., min(offsets):] # remove the leading unused data + arr = np.lib.stride_tricks.as_strided(arr, + new_shape, + new_strides, + subok=True) + + # cast and drop the last dimension again + arr = arr.view(out_dtype)[..., 0] + + if common_stride < 0: + arr = arr[..., ::-1] # reverse, if the stride was negative + if type(arr) is not type(wrap.__self__): + # Some types (e.g. recarray) turn into an ndarray along the + # way, so we have to wrap it again in order to match the + # behavior with copy=True. + arr = wrap(arr) + return arr + + # next cast to a packed format with all fields converted to new dtype + packed_fields = np.dtype({'names': names, + 'formats': [(out_dtype, dt.shape) for dt in dts]}) + arr = arr.astype(packed_fields, copy=copy, casting=casting) + + # finally is it safe to view the packed fields as the unstructured type + return arr.view((out_dtype, (sum(counts),))) + + +def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None, + align=None, copy=None, casting=None): + return (arr,) + +@array_function_dispatch(_unstructured_to_structured_dispatcher) +def unstructured_to_structured(arr, dtype=None, names=None, align=False, + copy=False, casting='unsafe'): + """ + Converts an n-D unstructured array into an (n-1)-D structured array. + + The last dimension of the input array is converted into a structure, with + number of field-elements equal to the size of the last dimension of the + input array. By default all output fields have the input array's dtype, but + an output structured dtype with an equal number of fields-elements can be + supplied instead. + + Nested fields, as well as each element of any subarray fields, all count + towards the number of field-elements. + + Parameters + ---------- + arr : ndarray + Unstructured array or dtype to convert. + dtype : dtype, optional + The structured dtype of the output array + names : list of strings, optional + If dtype is not supplied, this specifies the field names for the output + dtype, in order. The field dtypes will be the same as the input array. + align : boolean, optional + Whether to create an aligned memory layout. + copy : bool, optional + See copy argument to `numpy.ndarray.astype`. If true, always return a + copy. If false, and `dtype` requirements are satisfied, a view is + returned. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `numpy.ndarray.astype`. Controls what kind of + data casting may occur. + + Returns + ------- + structured : ndarray + Structured array with fewer dimensions. + + Examples + -------- + + >>> from numpy.lib import recfunctions as rfn + >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a = np.arange(20).reshape((4,5)) + >>> a + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]]) + >>> rfn.unstructured_to_structured(a, dt) + array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])], + dtype=[('a', '>> from numpy.lib import recfunctions as rfn + >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> rfn.apply_along_fields(np.mean, b) + array([ 2.66666667, 5.33333333, 8.66666667, 11. ]) + >>> rfn.apply_along_fields(np.mean, b[['x', 'z']]) + array([ 3. , 5.5, 9. , 11. ]) + + """ + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + uarr = structured_to_unstructured(arr) + return func(uarr, axis=-1) + # works and avoids axis requirement, but very, very slow: + #return np.apply_along_axis(func, -1, uarr) + +def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None): + return dst, src + +@array_function_dispatch(_assign_fields_by_name_dispatcher) +def assign_fields_by_name(dst, src, zero_unassigned=True): + """ + Assigns values from one structured array to another by field name. + + Normally in numpy >= 1.14, assignment of one structured array to another + copies fields "by position", meaning that the first field from the src is + copied to the first field of the dst, and so on, regardless of field name. + + This function instead copies "by field name", such that fields in the dst + are assigned from the identically named field in the src. This applies + recursively for nested structures. This is how structure assignment worked + in numpy >= 1.6 to <= 1.13. + + Parameters + ---------- + dst : ndarray + src : ndarray + The source and destination arrays during assignment. + zero_unassigned : bool, optional + If True, fields in the dst for which there was no matching + field in the src are filled with the value 0 (zero). This + was the behavior of numpy <= 1.13. If False, those fields + are not modified. + """ + + if dst.dtype.names is None: + dst[...] = src + return + + for name in dst.dtype.names: + if name not in src.dtype.names: + if zero_unassigned: + dst[name] = 0 + else: + assign_fields_by_name(dst[name], src[name], + zero_unassigned) + +def _require_fields_dispatcher(array, required_dtype): + return (array,) + +@array_function_dispatch(_require_fields_dispatcher) +def require_fields(array, required_dtype): + """ + Casts a structured array to a new dtype using assignment by field-name. + + This function assigns from the old to the new array by name, so the + value of a field in the output array is the value of the field with the + same name in the source array. This has the effect of creating a new + ndarray containing only the fields "required" by the required_dtype. + + If a field name in the required_dtype does not exist in the + input array, that field is created and set to 0 in the output array. + + Parameters + ---------- + a : ndarray + array to cast + required_dtype : dtype + datatype for output array + + Returns + ------- + out : ndarray + array with the new dtype, with field values copied from the fields in + the input array with the same name + + Examples + -------- + + >>> from numpy.lib import recfunctions as rfn + >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')]) + array([(1., 1), (1., 1), (1., 1), (1., 1)], + dtype=[('b', '>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')]) + array([(1., 0), (1., 0), (1., 0), (1., 0)], + dtype=[('b', '>> from numpy.lib import recfunctions as rfn + >>> x = np.array([1, 2,]) + >>> rfn.stack_arrays(x) is x + True + >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) + >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)]) + >>> test = rfn.stack_arrays((z,zz)) + >>> test + masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0), + (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)], + mask=[(False, False, True), (False, False, True), + (False, False, False), (False, False, False), + (False, False, False)], + fill_value=(b'N/A', 1e+20, 1e+20), + dtype=[('A', 'S3'), ('B', ' '%s'" % + (cdtype, fdtype)) + # Only one field: use concatenate + if len(newdescr) == 1: + output = ma.concatenate(seqarrays) + else: + # + output = ma.masked_all((np.sum(nrecords),), newdescr) + offset = np.cumsum(np.r_[0, nrecords]) + seen = [] + for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): + names = a.dtype.names + if names is None: + output['f%i' % len(seen)][i:j] = a + else: + for name in n: + output[name][i:j] = a[name] + if name not in seen: + seen.append(name) + # + return _fix_output(_fix_defaults(output, defaults), + usemask=usemask, asrecarray=asrecarray) + + +def _find_duplicates_dispatcher( + a, key=None, ignoremask=None, return_index=None): + return (a,) + + +@array_function_dispatch(_find_duplicates_dispatcher) +def find_duplicates(a, key=None, ignoremask=True, return_index=False): + """ + Find the duplicates in a structured array along a given key + + Parameters + ---------- + a : array-like + Input array + key : {string, None}, optional + Name of the fields along which to check the duplicates. + If None, the search is performed by records + ignoremask : {True, False}, optional + Whether masked data should be discarded or considered as duplicates. + return_index : {False, True}, optional + Whether to return the indices of the duplicated values. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = [('a', int)] + >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], + ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) + (masked_array(data=[(1,), (1,), (2,), (2,)], + mask=[(False,), (False,), (False,), (False,)], + fill_value=(999999,), + dtype=[('a', '= nb1)] - nb1 + (r1cmn, r2cmn) = (len(idx_1), len(idx_2)) + if jointype == 'inner': + (r1spc, r2spc) = (0, 0) + elif jointype == 'outer': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1)) + (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn) + elif jointype == 'leftouter': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + (r1spc, r2spc) = (len(idx_1) - r1cmn, 0) + # Select the entries from each input + (s1, s2) = (r1[idx_1], r2[idx_2]) + # + # Build the new description of the output array ....... + # Start with the key fields + ndtype = _get_fieldspec(r1k.dtype) + + # Add the fields from r1 + for fname, fdtype in _get_fieldspec(r1.dtype): + if fname not in key: + ndtype.append((fname, fdtype)) + + # Add the fields from r2 + for fname, fdtype in _get_fieldspec(r2.dtype): + # Have we seen the current name already ? + # we need to rebuild this list every time + names = list(name for name, dtype in ndtype) + try: + nameidx = names.index(fname) + except ValueError: + #... we haven't: just add the description to the current list + ndtype.append((fname, fdtype)) + else: + # collision + _, cdtype = ndtype[nameidx] + if fname in key: + # The current field is part of the key: take the largest dtype + ndtype[nameidx] = (fname, max(fdtype, cdtype)) + else: + # The current field is not part of the key: add the suffixes, + # and place the new field adjacent to the old one + ndtype[nameidx:nameidx + 1] = [ + (fname + r1postfix, cdtype), + (fname + r2postfix, fdtype) + ] + # Rebuild a dtype from the new fields + ndtype = np.dtype(ndtype) + # Find the largest nb of common fields : + # r1cmn and r2cmn should be equal, but... + cmn = max(r1cmn, r2cmn) + # Construct an empty array + output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype) + names = output.dtype.names + for f in r1names: + selected = s1[f] + if f not in names or (f in r2names and not r2postfix and f not in key): + f += r1postfix + current = output[f] + current[:r1cmn] = selected[:r1cmn] + if jointype in ('outer', 'leftouter'): + current[cmn:cmn + r1spc] = selected[r1cmn:] + for f in r2names: + selected = s2[f] + if f not in names or (f in r1names and not r1postfix and f not in key): + f += r2postfix + current = output[f] + current[:r2cmn] = selected[:r2cmn] + if (jointype == 'outer') and r2spc: + current[-r2spc:] = selected[r2cmn:] + # Sort and finalize the output + output.sort(order=key) + kwargs = dict(usemask=usemask, asrecarray=asrecarray) + return _fix_output(_fix_defaults(output, defaults), **kwargs) + + +def _rec_join_dispatcher( + key, r1, r2, jointype=None, r1postfix=None, r2postfix=None, + defaults=None): + return (r1, r2) + + +@array_function_dispatch(_rec_join_dispatcher) +def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', + defaults=None): + """ + Join arrays `r1` and `r2` on keys. + Alternative to join_by, that always returns a np.recarray. + + See Also + -------- + join_by : equivalent function + """ + kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix, + defaults=defaults, usemask=False, asrecarray=True) + return join_by(key, r1, r2, **kwargs) diff --git a/phivenv/Lib/site-packages/numpy/lib/scimath.py b/phivenv/Lib/site-packages/numpy/lib/scimath.py new file mode 100644 index 0000000000000000000000000000000000000000..46650cd8e637d38b1297c4b2152786048304da94 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/scimath.py @@ -0,0 +1,4 @@ +from ._scimath_impl import ( + __all__, __doc__, sqrt, log, log2, logn, log10, power, arccos, arcsin, + arctanh +) diff --git a/phivenv/Lib/site-packages/numpy/lib/scimath.pyi b/phivenv/Lib/site-packages/numpy/lib/scimath.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5598311c1c37cda62a8bde60b7d8976eeb7768af --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/scimath.pyi @@ -0,0 +1,12 @@ +from ._scimath_impl import ( + __all__ as __all__, + sqrt as sqrt, + log as log, + log2 as log2, + logn as logn, + log10 as log10, + power as power, + arccos as arccos, + arcsin as arcsin, + arctanh as arctanh, +) diff --git a/phivenv/Lib/site-packages/numpy/lib/stride_tricks.py b/phivenv/Lib/site-packages/numpy/lib/stride_tricks.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd428bf0f37feb3d5f6004d86e71deb86487825 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/stride_tricks.py @@ -0,0 +1,3 @@ +from ._stride_tricks_impl import ( + __doc__, as_strided, sliding_window_view +) diff --git a/phivenv/Lib/site-packages/numpy/lib/stride_tricks.pyi b/phivenv/Lib/site-packages/numpy/lib/stride_tricks.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0f83c9b10d4d8c7bbfce0faae6488860921a8ded --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/stride_tricks.pyi @@ -0,0 +1,4 @@ +from numpy.lib._stride_tricks_impl import ( + as_strided as as_strided, + sliding_window_view as sliding_window_view, +) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__init__.py b/phivenv/Lib/site-packages/numpy/lib/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4348e6d79eb8b0b53b2fdb354023c714f6fa2c48 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c60ee1b84d1e87983bd95cb896e8ba2c15dc40b Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a83aaa2a200845f74618d2b950ee7d5b2930ffdd Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..646335bc20a5d10ec21e9fbf69617bc476227f5b Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_array_utils.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_array_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b67d09e6d0eda6a83e84446f11ca3e6de1a077f0 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_array_utils.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87ed06c98932c4d858f4579349a3acdaadec2bde Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e20341bcae7d833baac82d46aeaa64b63e8180bb Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43b1074e98fa92ffe8ce812d8abfc3fc26f28e91 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..329eaf632dd7e7e1352b2972e30ce022ddbf36a4 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a106bf58ce3fe0074eb0b602b4dc0349c75c3b36 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..743491868f5226e6d2fc57e49291f59d008f38c7 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..919cc9e5f0fd4efabd30b3b07f0d387cdb87794e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_loadtxt.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_loadtxt.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..454b9685db999f45e1369809bb68194ec6ad8b02 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_loadtxt.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a90ef6d4ec386578e52dc7852c1fc1f60564ce2 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64dc2f3615632bd0c530449672f07180e343c1da Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07b87a5efedd4cb5398c784c1022cd6b41e6937e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8471e9beb5e6a1912059061c892aeac11e34487a Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc89ad363218885943b0890d3baed72f3f12a797 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78646bd9413313dd5568d285bfacc2057e90b2f7 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e15b170e69226a510045ab6c77aeee48fcc7241 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c087d53020ff86dbef74935aae7b087693a6f0ae Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffcb2559a096978aec0954e57e8416453d58c1e9 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aadf9c0939ffefaf89d79b23dbd979080be98309 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b02201db24029f8ae8525efb0fe11f025771d90 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dd509cf983db17caa75c2e293c3dffd64df4e5b Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/data/py2-np0-objarr.npy b/phivenv/Lib/site-packages/numpy/lib/tests/data/py2-np0-objarr.npy new file mode 100644 index 0000000000000000000000000000000000000000..a6e9e23974e674f1f42670a3824ed42254000656 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/data/py2-np0-objarr.npy differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npy b/phivenv/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npy new file mode 100644 index 0000000000000000000000000000000000000000..12936c92d8f8a122f2342a7782f56e7506010c40 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npy differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npz b/phivenv/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npz new file mode 100644 index 0000000000000000000000000000000000000000..68a3b53a1df0574ad05b0e8bc53279e1110db9af Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npz differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npy b/phivenv/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npy new file mode 100644 index 0000000000000000000000000000000000000000..c9f33b010db65a716475b178aeb1bf9937a46729 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npy differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npz b/phivenv/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npz new file mode 100644 index 0000000000000000000000000000000000000000..fd7d9d31ca974c5ae05b1ba397253226373cde86 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npz differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/data/python3.npy b/phivenv/Lib/site-packages/numpy/lib/tests/data/python3.npy new file mode 100644 index 0000000000000000000000000000000000000000..7c6997dd69eef019d6745dee125e92434b7954e3 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/data/python3.npy differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/data/win64python2.npy b/phivenv/Lib/site-packages/numpy/lib/tests/data/win64python2.npy new file mode 100644 index 0000000000000000000000000000000000000000..d9bc36af7392dbee12a43c436e2178235ac60b13 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/lib/tests/data/win64python2.npy differ diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test__datasource.py b/phivenv/Lib/site-packages/numpy/lib/tests/test__datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..df33b4042a168b383296d1209d62dc08b4882df6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test__datasource.py @@ -0,0 +1,350 @@ +import os +import pytest +from tempfile import mkdtemp, mkstemp, NamedTemporaryFile +from shutil import rmtree + +import numpy.lib._datasource as datasource +from numpy.testing import assert_, assert_equal, assert_raises + +import urllib.request as urllib_request +from urllib.parse import urlparse +from urllib.error import URLError + + +def urlopen_stub(url, data=None): + '''Stub to replace urlopen for testing.''' + if url == valid_httpurl(): + tmpfile = NamedTemporaryFile(prefix='urltmp_') + return tmpfile + else: + raise URLError('Name or service not known') + +# setup and teardown +old_urlopen = None + + +def setup_module(): + global old_urlopen + + old_urlopen = urllib_request.urlopen + urllib_request.urlopen = urlopen_stub + + +def teardown_module(): + urllib_request.urlopen = old_urlopen + +# A valid website for more robust testing +http_path = 'http://www.google.com/' +http_file = 'index.html' + +http_fakepath = 'http://fake.abc.web/site/' +http_fakefile = 'fake.txt' + +malicious_files = ['/etc/shadow', '../../shadow', + '..\\system.dat', 'c:\\windows\\system.dat'] + +magic_line = b'three is the magic number' + + +# Utility functions used by many tests +def valid_textfile(filedir): + # Generate and return a valid temporary file. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) + os.close(fd) + return path + + +def invalid_textfile(filedir): + # Generate and return an invalid filename. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir) + os.close(fd) + os.remove(path) + return path + + +def valid_httpurl(): + return http_path+http_file + + +def invalid_httpurl(): + return http_fakepath+http_fakefile + + +def valid_baseurl(): + return http_path + + +def invalid_baseurl(): + return http_fakepath + + +def valid_httpfile(): + return http_file + + +def invalid_httpfile(): + return http_fakefile + + +class TestDataSourceOpen: + def setup_method(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + fh = self.ds.open(valid_httpurl()) + assert_(fh) + fh.close() + + def test_InvalidHTTP(self): + url = invalid_httpurl() + assert_raises(OSError, self.ds.open, url) + try: + self.ds.open(url) + except OSError as e: + # Regression test for bug fixed in r4342. + assert_(e.errno is None) + + def test_InvalidHTTPCacheURLError(self): + assert_raises(URLError, self.ds._cache, invalid_httpurl()) + + def test_ValidFile(self): + local_file = valid_textfile(self.tmpdir) + fh = self.ds.open(local_file) + assert_(fh) + fh.close() + + def test_InvalidFile(self): + invalid_file = invalid_textfile(self.tmpdir) + assert_raises(OSError, self.ds.open, invalid_file) + + def test_ValidGzipFile(self): + try: + import gzip + except ImportError: + # We don't have the gzip capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for Gzip files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') + fp = gzip.open(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + def test_ValidBz2File(self): + try: + import bz2 + except ImportError: + # We don't have the bz2 capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for BZip2 files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') + fp = bz2.BZ2File(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + +class TestDataSourceExists: + def setup_method(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + assert_(self.ds.exists(valid_httpurl())) + + def test_InvalidHTTP(self): + assert_equal(self.ds.exists(invalid_httpurl()), False) + + def test_ValidFile(self): + # Test valid file in destpath + tmpfile = valid_textfile(self.tmpdir) + assert_(self.ds.exists(tmpfile)) + # Test valid local file not in destpath + localdir = mkdtemp() + tmpfile = valid_textfile(localdir) + assert_(self.ds.exists(tmpfile)) + rmtree(localdir) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + assert_equal(self.ds.exists(tmpfile), False) + + +class TestDataSourceAbspath: + def setup_method(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.ds = datasource.DataSource(self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + assert_equal(local_path, self.ds.abspath(valid_httpurl())) + + def test_ValidFile(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_equal(tmpfile, self.ds.abspath(tmpfilename)) + # Test filename with complete path + assert_equal(tmpfile, self.ds.abspath(tmpfile)) + + def test_InvalidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) + invalidhttp = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + assert_(invalidhttp != self.ds.abspath(valid_httpurl())) + + def test_InvalidFile(self): + invalidfile = valid_textfile(self.tmpdir) + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_(invalidfile != self.ds.abspath(tmpfilename)) + # Test filename with complete path + assert_(invalidfile != self.ds.abspath(tmpfile)) + + def test_sandboxing(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + + tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) + + assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(tmpfile).startswith(self.tmpdir)) + assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_ValidFile() + self.test_InvalidHTTP() + self.test_InvalidFile() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryAbspath: + def setup_method(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.repos._destpath, netloc, + upath.strip(os.sep).strip('/')) + filepath = self.repos.abspath(valid_httpfile()) + assert_equal(local_path, filepath) + + def test_sandboxing(self): + tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) + assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryExists: + def setup_method(self): + self.tmpdir = mkdtemp() + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidFile(self): + # Create local temp file + tmpfile = valid_textfile(self.tmpdir) + assert_(self.repos.exists(tmpfile)) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + assert_equal(self.repos.exists(tmpfile), False) + + def test_RemoveHTTPFile(self): + assert_(self.repos.exists(valid_httpurl())) + + def test_CachedHTTPFile(self): + localfile = valid_httpurl() + # Create a locally cached temp file with an URL based + # directory structure. This is similar to what Repository.open + # would do. + scheme, netloc, upath, pms, qry, frg = urlparse(localfile) + local_path = os.path.join(self.repos._destpath, netloc) + os.mkdir(local_path, 0o0700) + tmpfile = valid_textfile(local_path) + assert_(self.repos.exists(tmpfile)) + + +class TestOpenFunc: + def setup_method(self): + self.tmpdir = mkdtemp() + + def teardown_method(self): + rmtree(self.tmpdir) + + def test_DataSourceOpen(self): + local_file = valid_textfile(self.tmpdir) + # Test case where destpath is passed in + fp = datasource.open(local_file, destpath=self.tmpdir) + assert_(fp) + fp.close() + # Test case where default destpath is used + fp = datasource.open(local_file) + assert_(fp) + fp.close() + +def test_del_attr_handling(): + # DataSource __del__ can be called + # even if __init__ fails when the + # Exception object is caught by the + # caller as happens in refguide_check + # is_deprecated() function + + ds = datasource.DataSource() + # simulate failed __init__ by removing key attribute + # produced within __init__ and expected by __del__ + del ds._istmpdest + # should not raise an AttributeError if __del__ + # gracefully handles failed __init__: + ds.__del__() diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test__iotools.py b/phivenv/Lib/site-packages/numpy/lib/tests/test__iotools.py new file mode 100644 index 0000000000000000000000000000000000000000..f1de9725faf47bbc5fd58c035ab4c887b8a805f2 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test__iotools.py @@ -0,0 +1,353 @@ +import time +from datetime import date + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_allclose, assert_raises, + ) +from numpy.lib._iotools import ( + LineSplitter, NameValidator, StringConverter, + has_nested_fields, easy_dtype, flatten_dtype + ) + + +class TestLineSplitter: + "Tests the LineSplitter class." + + def test_no_delimiter(self): + "Test LineSplitter w/o delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter()(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + test = LineSplitter('')(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + + def test_space_delimiter(self): + "Test space delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(' ')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + test = LineSplitter(' ')(strg) + assert_equal(test, ['1 2 3 4', '5']) + + def test_tab_delimiter(self): + "Test tab delimiter" + strg = " 1\t 2\t 3\t 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1', '2', '3', '4', '5 6']) + strg = " 1 2\t 3 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1 2', '3 4', '5 6']) + + def test_other_delimiter(self): + "Test LineSplitter on delimiter" + strg = "1,2,3,4,,5" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + # + strg = " 1,2,3,4,,5 # test" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + # gh-11028 bytes comment/delimiters should get encoded + strg = b" 1,2,3,4,,5 % test" + test = LineSplitter(delimiter=b',', comments=b'%')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + def test_constant_fixed_width(self): + "Test LineSplitter w/ fixed-width fields" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(3)(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5', '']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(20)(strg) + assert_equal(test, ['1 3 4 5 6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(30)(strg) + assert_equal(test, ['1 3 4 5 6']) + + def test_variable_fixed_width(self): + strg = " 1 3 4 5 6# test" + test = LineSplitter((3, 6, 6, 3))(strg) + assert_equal(test, ['1', '3', '4 5', '6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter((6, 6, 9))(strg) + assert_equal(test, ['1', '3 4', '5 6']) + +# ----------------------------------------------------------------------------- + + +class TestNameValidator: + + def test_case_sensitivity(self): + "Test case sensitivity" + names = ['A', 'a', 'b', 'c'] + test = NameValidator().validate(names) + assert_equal(test, ['A', 'a', 'b', 'c']) + test = NameValidator(case_sensitive=False).validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='upper').validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='lower').validate(names) + assert_equal(test, ['a', 'a_1', 'b', 'c']) + + # check exceptions + assert_raises(ValueError, NameValidator, case_sensitive='foobar') + + def test_excludelist(self): + "Test excludelist" + names = ['dates', 'data', 'Other Data', 'mask'] + validator = NameValidator(excludelist=['dates', 'data', 'mask']) + test = validator.validate(names) + assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) + + def test_missing_names(self): + "Test validate missing names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist), ['a', 'b', 'c']) + namelist = ('', 'b', 'c') + assert_equal(validator(namelist), ['f0', 'b', 'c']) + namelist = ('a', 'b', '') + assert_equal(validator(namelist), ['a', 'b', 'f0']) + namelist = ('', 'f0', '') + assert_equal(validator(namelist), ['f1', 'f0', 'f2']) + + def test_validate_nb_names(self): + "Test validate nb names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist, nbfields=1), ('a',)) + assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), + ['a', 'b', 'c', 'g0', 'g1']) + + def test_validate_wo_names(self): + "Test validate no names" + namelist = None + validator = NameValidator() + assert_(validator(namelist) is None) + assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2']) + +# ----------------------------------------------------------------------------- + + +def _bytes_to_date(s): + return date(*time.strptime(s, "%Y-%m-%d")[:3]) + + +class TestStringConverter: + "Test StringConverter" + + def test_creation(self): + "Test creation of a StringConverter" + converter = StringConverter(int, -99999) + assert_equal(converter._status, 1) + assert_equal(converter.default, -99999) + + def test_upgrade(self): + "Tests the upgrade method." + + converter = StringConverter() + assert_equal(converter._status, 0) + + # test int + assert_equal(converter.upgrade('0'), 0) + assert_equal(converter._status, 1) + + # On systems where long defaults to 32-bit, the statuses will be + # offset by one, so we check for this here. + import numpy._core.numeric as nx + status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize) + + # test int > 2**32 + assert_equal(converter.upgrade('17179869184'), 17179869184) + assert_equal(converter._status, 1 + status_offset) + + # test float + assert_allclose(converter.upgrade('0.'), 0.0) + assert_equal(converter._status, 2 + status_offset) + + # test complex + assert_equal(converter.upgrade('0j'), complex('0j')) + assert_equal(converter._status, 3 + status_offset) + + # test str + # note that the longdouble type has been skipped, so the + # _status increases by 2. Everything should succeed with + # unicode conversion (8). + for s in ['a', b'a']: + res = converter.upgrade(s) + assert_(type(res) is str) + assert_equal(res, 'a') + assert_equal(converter._status, 8 + status_offset) + + def test_missing(self): + "Tests the use of missing values." + converter = StringConverter(missing_values=('missing', + 'missed')) + converter.upgrade('0') + assert_equal(converter('0'), 0) + assert_equal(converter(''), converter.default) + assert_equal(converter('missing'), converter.default) + assert_equal(converter('missed'), converter.default) + try: + converter('miss') + except ValueError: + pass + + def test_upgrademapper(self): + "Tests updatemapper" + dateparser = _bytes_to_date + _original_mapper = StringConverter._mapper[:] + try: + StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) + convert = StringConverter(dateparser, date(2000, 1, 1)) + test = convert('2001-01-01') + assert_equal(test, date(2001, 1, 1)) + test = convert('2009-01-01') + assert_equal(test, date(2009, 1, 1)) + test = convert('') + assert_equal(test, date(2000, 1, 1)) + finally: + StringConverter._mapper = _original_mapper + + def test_string_to_object(self): + "Make sure that string-to-object functions are properly recognized" + old_mapper = StringConverter._mapper[:] # copy of list + conv = StringConverter(_bytes_to_date) + assert_equal(conv._mapper, old_mapper) + assert_(hasattr(conv, 'default')) + + def test_keep_default(self): + "Make sure we don't lose an explicit default" + converter = StringConverter(None, missing_values='', + default=-999) + converter.upgrade('3.14159265') + assert_equal(converter.default, -999) + assert_equal(converter.type, np.dtype(float)) + # + converter = StringConverter( + None, missing_values='', default=0) + converter.upgrade('3.14159265') + assert_equal(converter.default, 0) + assert_equal(converter.type, np.dtype(float)) + + def test_keep_default_zero(self): + "Check that we don't lose a default of 0" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal(converter.default, 0) + + def test_keep_missing_values(self): + "Check that we're not losing missing values" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal( + converter.missing_values, {'', 'N/A'}) + + def test_int64_dtype(self): + "Check that int64 integer types can be specified" + converter = StringConverter(np.int64, default=0) + val = "-9223372036854775807" + assert_(converter(val) == -9223372036854775807) + val = "9223372036854775807" + assert_(converter(val) == 9223372036854775807) + + def test_uint64_dtype(self): + "Check that uint64 integer types can be specified" + converter = StringConverter(np.uint64, default=0) + val = "9223372043271415339" + assert_(converter(val) == 9223372043271415339) + + +class TestMiscFunctions: + + def test_has_nested_dtype(self): + "Test has_nested_dtype" + ndtype = np.dtype(float) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + assert_equal(has_nested_fields(ndtype), True) + + def test_easy_dtype(self): + "Test ndtype on dtypes" + # Simple case + ndtype = float + assert_equal(easy_dtype(ndtype), np.dtype(float)) + # As string w/o names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', "i4"), ('f1', "f8")])) + # As string w/o names but different default format + assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"), + np.dtype([('field_000', "i4"), ('field_001', "f8")])) + # As string w/ names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (too many) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (not enough) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names=", b"), + np.dtype([('f0', "i4"), ('b', "f8")])) + # ... (with different default format) + assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"), + np.dtype([('a', "i4"), ('f00', "f8")])) + # As list of tuples w/o names + ndtype = [('A', int), ('B', float)] + assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)])) + # As list of tuples w/ names + assert_equal(easy_dtype(ndtype, names="a,b"), + np.dtype([('a', int), ('b', float)])) + # As list of tuples w/ not enough names + assert_equal(easy_dtype(ndtype, names="a"), + np.dtype([('a', int), ('f0', float)])) + # As list of tuples w/ too many names + assert_equal(easy_dtype(ndtype, names="a,b,c"), + np.dtype([('a', int), ('b', float)])) + # As list of types w/o names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', int), ('f1', float), ('f2', float)])) + # As list of types w names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', int), ('b', float), ('c', float)])) + # As simple dtype w/ names + ndtype = np.dtype(float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([(_, float) for _ in ('a', 'b', 'c')])) + # As simple dtype w/o names (but multiple fields) + ndtype = np.dtype(float) + assert_equal( + easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), + np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) + + def test_flatten_dtype(self): + "Testing flatten_dtype" + # Standard dtype + dt = np.dtype([("a", "f8"), ("b", "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) + # Recursive dtype + dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) + # dtype with shaped fields + dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, int]) + dt_flat = flatten_dtype(dt, True) + assert_equal(dt_flat, [float] * 2 + [int] * 3) + # dtype w/ titles + dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test__version.py b/phivenv/Lib/site-packages/numpy/lib/tests/test__version.py new file mode 100644 index 0000000000000000000000000000000000000000..679f764639799d06cd716985e26f23f07c03a670 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test__version.py @@ -0,0 +1,64 @@ +"""Tests for the NumpyVersion class. + +""" +from numpy.testing import assert_, assert_raises +from numpy.lib import NumpyVersion + + +def test_main_versions(): + assert_(NumpyVersion('1.8.0') == '1.8.0') + for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']: + assert_(NumpyVersion('1.8.0') < ver) + + for ver in ['1.7.0', '1.7.1', '0.9.9']: + assert_(NumpyVersion('1.8.0') > ver) + + +def test_version_1_point_10(): + # regression test for gh-2998. + assert_(NumpyVersion('1.9.0') < '1.10.0') + assert_(NumpyVersion('1.11.0') < '1.11.1') + assert_(NumpyVersion('1.11.0') == '1.11.0') + assert_(NumpyVersion('1.99.11') < '1.99.12') + + +def test_alpha_beta_rc(): + assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1') + for ver in ['1.8.0', '1.8.0rc2']: + assert_(NumpyVersion('1.8.0rc1') < ver) + + for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: + assert_(NumpyVersion('1.8.0rc1') > ver) + + assert_(NumpyVersion('1.8.0b1') > '1.8.0a2') + + +def test_dev_version(): + assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']: + assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111') + + +def test_dev_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111') + assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') + + +def test_dev0_version(): + assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: + assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111') + + +def test_dev0_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111') + assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2') + + +def test_raises(): + for ver in ['1.9', '1,9.0', '1.7.x']: + assert_raises(ValueError, NumpyVersion, ver) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_array_utils.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_array_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8f3cb251a4f9400fe8f2449f6f4cc8acf0fc458c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_array_utils.py @@ -0,0 +1,33 @@ +import numpy as np + +from numpy.lib import array_utils +from numpy.testing import assert_equal + + +class TestByteBounds: + def test_byte_bounds(self): + # pointer difference matches size * itemsize + # due to contiguity + a = np.arange(12).reshape(3, 4) + low, high = array_utils.byte_bounds(a) + assert_equal(high - low, a.size * a.itemsize) + + def test_unusual_order_positive_stride(self): + a = np.arange(12).reshape(3, 4) + b = a.T + low, high = array_utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_unusual_order_negative_stride(self): + a = np.arange(12).reshape(3, 4) + b = a.T[::-1] + low, high = array_utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_strided(self): + a = np.arange(12) + b = a[::2] + low, high = array_utils.byte_bounds(b) + # the largest pointer address is lost (even numbers only in the + # stride), and compensate addresses for striding by 2 + assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_arraypad.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_arraypad.py new file mode 100644 index 0000000000000000000000000000000000000000..82476ab8a4a737a54ec5bc26cdf352e05a6b0169 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_arraypad.py @@ -0,0 +1,1416 @@ +"""Tests for the array padding functions. + +""" +import pytest + +import numpy as np +from numpy.testing import assert_array_equal, assert_allclose, assert_equal +from numpy.lib._arraypad_impl import _as_pairs + + +_numeric_dtypes = ( + np._core.sctypes["uint"] + + np._core.sctypes["int"] + + np._core.sctypes["float"] + + np._core.sctypes["complex"] +) +_all_modes = { + 'constant': {'constant_values': 0}, + 'edge': {}, + 'linear_ramp': {'end_values': 0}, + 'maximum': {'stat_length': None}, + 'mean': {'stat_length': None}, + 'median': {'stat_length': None}, + 'minimum': {'stat_length': None}, + 'reflect': {'reflect_type': 'even'}, + 'symmetric': {'reflect_type': 'even'}, + 'wrap': {}, + 'empty': {} +} + + +class TestAsPairs: + def test_single_value(self): + """Test casting for a single value.""" + expected = np.array([[3, 3]] * 10) + for x in (3, [3], [[3]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # Test with dtype=object + obj = object() + assert_equal( + _as_pairs(obj, 10), + np.array([[obj, obj]] * 10) + ) + + def test_two_values(self): + """Test proper casting for two different values.""" + # Broadcasting in the first dimension with numbers + expected = np.array([[3, 4]] * 10) + for x in ([3, 4], [[3, 4]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # and with dtype=object + obj = object() + assert_equal( + _as_pairs(["a", obj], 10), + np.array([["a", obj]] * 10) + ) + + # Broadcasting in the second / last dimension with numbers + assert_equal( + _as_pairs([[3], [4]], 2), + np.array([[3, 3], [4, 4]]) + ) + # and with dtype=object + assert_equal( + _as_pairs([["a"], [obj]], 2), + np.array([["a", "a"], [obj, obj]]) + ) + + def test_with_none(self): + expected = ((None, None), (None, None), (None, None)) + assert_equal( + _as_pairs(None, 3, as_index=False), + expected + ) + assert_equal( + _as_pairs(None, 3, as_index=True), + expected + ) + + def test_pass_through(self): + """Test if `x` already matching desired output are passed through.""" + expected = np.arange(12).reshape((6, 2)) + assert_equal( + _as_pairs(expected, 6), + expected + ) + + def test_as_index(self): + """Test results if `as_index=True`.""" + assert_equal( + _as_pairs([2.6, 3.3], 10, as_index=True), + np.array([[3, 3]] * 10, dtype=np.intp) + ) + assert_equal( + _as_pairs([2.6, 4.49], 10, as_index=True), + np.array([[3, 4]] * 10, dtype=np.intp) + ) + for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]], + [[1, 2]] * 9 + [[1, -2]]): + with pytest.raises(ValueError, match="negative values"): + _as_pairs(x, 10, as_index=True) + + def test_exceptions(self): + """Ensure faulty usage is discovered.""" + with pytest.raises(ValueError, match="more dimensions than allowed"): + _as_pairs([[[3]]], 10) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs([[1, 2], [3, 4]], 3) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs(np.ones((2, 3)), 3) + + +class TestConditionalShortcuts: + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_zero_padding_shortcuts(self, mode): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(0, 0) for _ in test.shape] + assert_array_equal(test, np.pad(test, pad_amt, mode=mode)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_shallow_statistic_range(self, mode): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(1, 1) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode='edge'), + np.pad(test, pad_amt, mode=mode, stat_length=1)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_clip_statistic_range(self, mode): + test = np.arange(30).reshape(5, 6) + pad_amt = [(3, 3) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode=mode), + np.pad(test, pad_amt, mode=mode, stat_length=30)) + + +class TestStatistic: + def test_check_mean_stat_length(self): + a = np.arange(100).astype('f') + a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), )) + b = np.array( + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98. + ]) + assert_array_equal(a, b) + + def test_check_maximum_1(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'maximum') + b = np.array( + [99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99] + ) + assert_array_equal(a, b) + + def test_check_maximum_2(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'maximum') + b = np.array( + [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_maximum_stat_length(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'maximum', stat_length=10) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_minimum_1(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'minimum') + b = np.array( + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_minimum_2(self): + a = np.arange(100) + 2 + a = np.pad(a, (25, 20), 'minimum') + b = np.array( + [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] + ) + assert_array_equal(a, b) + + def test_check_minimum_stat_length(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'minimum', stat_length=10) + b = np.array( + [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91] + ) + assert_array_equal(a, b) + + def test_check_median(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'median') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + def test_check_median_01(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = np.pad(a, 1, 'median') + b = np.array( + [[4, 4, 5, 4, 4], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [4, 4, 5, 4, 4]] + ) + assert_array_equal(a, b) + + def test_check_median_02(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = np.pad(a.T, 1, 'median').T + b = np.array( + [[5, 4, 5, 4, 5], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [5, 4, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_median_stat_length(self): + a = np.arange(100).astype('f') + a[1] = 2. + a[97] = 96. + a = np.pad(a, (25, 20), 'median', stat_length=(3, 5)) + b = np.array( + [ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., + + 0., 2., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 96., 98., 99., + + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.] + ) + assert_array_equal(a, b) + + def test_check_mean_shape_one(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'mean', stat_length=2) + b = np.array( + [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_mean_2(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'mean') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + @pytest.mark.parametrize("mode", [ + "mean", + "median", + "minimum", + "maximum" + ]) + def test_same_prepend_append(self, mode): + """ Test that appended and prepended values are equal """ + # This test is constructed to trigger floating point rounding errors in + # a way that caused gh-11216 for mode=='mean' + a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64) + a = np.pad(a, (1, 1), mode) + assert_equal(a[0], a[-1]) + + @pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"]) + @pytest.mark.parametrize( + "stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))] + ) + def test_check_negative_stat_length(self, mode, stat_length): + arr = np.arange(30).reshape((6, 5)) + match = "index can't contain negative values" + with pytest.raises(ValueError, match=match): + np.pad(arr, 2, mode, stat_length=stat_length) + + def test_simple_stat_length(self): + a = np.arange(30) + a = np.reshape(a, (6, 5)) + a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,)) + b = np.array( + [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + + [1, 1, 1, 0, 1, 2, 3, 4, 3, 3], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [11, 11, 11, 10, 11, 12, 13, 14, 13, 13], + [16, 16, 16, 15, 16, 17, 18, 19, 18, 18], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [26, 26, 26, 25, 26, 27, 28, 29, 28, 28], + + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]] + ) + assert_array_equal(a, b) + + @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning") + @pytest.mark.filterwarnings( + "ignore:invalid value encountered in( scalar)? divide:RuntimeWarning" + ) + @pytest.mark.parametrize("mode", ["mean", "median"]) + def test_zero_stat_length_valid(self, mode): + arr = np.pad([1., 2.], (1, 2), mode, stat_length=0) + expected = np.array([np.nan, 1., 2., np.nan, np.nan]) + assert_equal(arr, expected) + + @pytest.mark.parametrize("mode", ["minimum", "maximum"]) + def test_zero_stat_length_invalid(self, mode): + match = "stat_length of 0 yields no value for padding" + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 0, mode, stat_length=0) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 0, mode, stat_length=(1, 0)) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 1, mode, stat_length=0) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 1, mode, stat_length=(1, 0)) + + +class TestConstant: + def test_check_constant(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20)) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20] + ) + assert_array_equal(a, b) + + def test_check_constant_zeros(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'constant') + b = np.array( + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_constant_float(self): + # If input array is int, but constant_values are float, the dtype of + # the array to be padded is kept + arr = np.arange(30).reshape(5, 6) + test = np.pad(arr, (1, 2), mode='constant', + constant_values=1.1) + expected = np.array( + [[ 1, 1, 1, 1, 1, 1, 1, 1, 1], + + [ 1, 0, 1, 2, 3, 4, 5, 1, 1], + [ 1, 6, 7, 8, 9, 10, 11, 1, 1], + [ 1, 12, 13, 14, 15, 16, 17, 1, 1], + [ 1, 18, 19, 20, 21, 22, 23, 1, 1], + [ 1, 24, 25, 26, 27, 28, 29, 1, 1], + + [ 1, 1, 1, 1, 1, 1, 1, 1, 1], + [ 1, 1, 1, 1, 1, 1, 1, 1, 1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float2(self): + # If input array is float, and constant_values are float, the dtype of + # the array to be padded is kept - here retaining the float constants + arr = np.arange(30).reshape(5, 6) + arr_float = arr.astype(np.float64) + test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant', + constant_values=1.1) + expected = np.array( + [[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + + [ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], + [ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], + [ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], + [ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], + [ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], + + [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float3(self): + a = np.arange(100, dtype=float) + a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2)) + b = np.array( + [-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2] + ) + assert_allclose(a, b) + + def test_check_constant_odd_pad_amount(self): + arr = np.arange(30).reshape(5, 6) + test = np.pad(arr, ((1,), (2,)), mode='constant', + constant_values=3) + expected = np.array( + [[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], + + [ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3], + [ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3], + [ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3], + [ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3], + [ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3], + + [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]] + ) + assert_allclose(test, expected) + + def test_check_constant_pad_2d(self): + arr = np.arange(4).reshape(2, 2) + test = np.pad(arr, ((1, 2), (1, 3)), mode='constant', + constant_values=((1, 2), (3, 4))) + expected = np.array( + [[3, 1, 1, 4, 4, 4], + [3, 0, 1, 4, 4, 4], + [3, 2, 3, 4, 4, 4], + [3, 2, 2, 4, 4, 4], + [3, 2, 2, 4, 4, 4]] + ) + assert_allclose(test, expected) + + def test_check_large_integers(self): + uint64_max = 2 ** 64 - 1 + arr = np.full(5, uint64_max, dtype=np.uint64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, uint64_max, dtype=np.uint64) + assert_array_equal(test, expected) + + int64_max = 2 ** 63 - 1 + arr = np.full(5, int64_max, dtype=np.int64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, int64_max, dtype=np.int64) + assert_array_equal(test, expected) + + def test_check_object_array(self): + arr = np.empty(1, dtype=object) + obj_a = object() + arr[0] = obj_a + obj_b = object() + obj_c = object() + arr = np.pad(arr, pad_width=1, mode='constant', + constant_values=(obj_b, obj_c)) + + expected = np.empty((3,), dtype=object) + expected[0] = obj_b + expected[1] = obj_a + expected[2] = obj_c + + assert_array_equal(arr, expected) + + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="constant") + assert result.shape == (3, 4, 4) + + +class TestLinearRamp: + def test_check_simple(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5)) + b = np.array( + [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56, + 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96, + 0.80, 0.64, 0.48, 0.32, 0.16, + + 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00, + 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, + 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, + 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, + 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, + 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, + 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, + 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, + 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, + 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, + + 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0, + 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.] + ) + assert_allclose(a, b, rtol=1e-5, atol=1e-5) + + def test_check_2d(self): + arr = np.arange(20).reshape(4, 5).astype(np.float64) + test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0)) + expected = np.array( + [[0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.], + [0., 0., 0., 1., 2., 3., 4., 2., 0.], + [0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.], + [0., 5., 10., 11., 12., 13., 14., 7., 0.], + [0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.], + [0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0.]]) + assert_allclose(test, expected) + + @pytest.mark.xfail(exceptions=(AssertionError,)) + def test_object_array(self): + from fractions import Fraction + arr = np.array([Fraction(1, 2), Fraction(-1, 2)]) + actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0) + + # deliberately chosen to have a non-power-of-2 denominator such that + # rounding to floats causes a failure. + expected = np.array([ + Fraction( 0, 12), + Fraction( 3, 12), + Fraction( 6, 12), + Fraction(-6, 12), + Fraction(-4, 12), + Fraction(-2, 12), + Fraction(-0, 12), + ]) + assert_equal(actual, expected) + + def test_end_values(self): + """Ensure that end values are exact.""" + a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp") + assert_equal(a[:, 0], 0.) + assert_equal(a[:, -1], 0.) + assert_equal(a[0, :], 0.) + assert_equal(a[-1, :], 0.) + + @pytest.mark.parametrize("dtype", _numeric_dtypes) + def test_negative_difference(self, dtype): + """ + Check correct behavior of unsigned dtypes if there is a negative + difference between the edge to pad and `end_values`. Check both cases + to be independent of implementation. Test behavior for all other dtypes + in case dtype casting interferes with complex dtypes. See gh-14191. + """ + x = np.array([3], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=0) + expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype) + assert_equal(result, expected) + + x = np.array([0], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=3) + expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype) + assert_equal(result, expected) + + +class TestReflect: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'reflect') + b = np.array( + [25, 24, 23, 22, 21, 20, 19, 18, 17, 16, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, + 88, 87, 86, 85, 84, 83, 82, 81, 80, 79] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'reflect', reflect_type='odd') + b = np.array( + [-25, -24, -23, -22, -21, -20, -19, -18, -17, -16, + -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, + -5, -4, -3, -2, -1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'reflect') + b = np.array( + [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'reflect') + b = np.array( + [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 2, 'reflect') + b = np.array([3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 3, 'reflect') + b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_03(self): + a = np.pad([1, 2, 3], 4, 'reflect') + b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) + assert_array_equal(a, b) + + def test_check_04(self): + a = np.pad([1, 2, 3], [1, 10], 'reflect') + b = np.array([2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_05(self): + a = np.pad([1, 2, 3, 4], [45, 10], 'reflect') + b = np.array( + [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, + 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_06(self): + a = np.pad([1, 2, 3, 4], [15, 2], 'symmetric') + b = np.array( + [2, 3, 4, 4, 3, 2, 1, 1, 2, 3, + 4, 4, 3, 2, 1, 1, 2, 3, 4, 4, + 3] + ) + assert_array_equal(a, b) + + def test_check_07(self): + a = np.pad([1, 2, 3, 4, 5, 6], [45, 3], 'symmetric') + b = np.array( + [4, 5, 6, 6, 5, 4, 3, 2, 1, 1, + 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, + 1, 1, 2, 3, 4, 5, 6, 6, 5, 4, + 3, 2, 1, 1, 2, 3, 4, 5, 6, 6, + 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, + 6, 6, 5, 4]) + assert_array_equal(a, b) + + +class TestEmptyArray: + """Check how padding behaves on arrays with an empty dimension.""" + + @pytest.mark.parametrize( + # Keep parametrization ordered, otherwise pytest-xdist might believe + # that different tests were collected during parallelization + "mode", sorted(_all_modes.keys() - {"constant", "empty"}) + ) + def test_pad_empty_dimension(self, mode): + match = ("can't extend empty axis 0 using modes other than 'constant' " + "or 'empty'") + with pytest.raises(ValueError, match=match): + np.pad([], 4, mode=mode) + with pytest.raises(ValueError, match=match): + np.pad(np.ndarray(0), 4, mode=mode) + with pytest.raises(ValueError, match=match): + np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode) + + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_pad_non_empty_dimension(self, mode): + result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode) + assert result.shape == (8, 0, 4) + + +class TestSymmetric: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'symmetric') + b = np.array( + [24, 23, 22, 21, 20, 19, 18, 17, 16, 15, + 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, + 4, 3, 2, 1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, + 89, 88, 87, 86, 85, 84, 83, 82, 81, 80] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd') + b = np.array( + [-24, -23, -22, -21, -20, -19, -18, -17, -16, -15, + -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, + -4, -3, -2, -1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + + assert_array_equal(a, b) + + def test_check_large_pad_odd(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd') + b = np.array( + [[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 2, 'symmetric') + b = np.array([2, 1, 1, 2, 3, 3, 2]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 3, 'symmetric') + b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_03(self): + a = np.pad([1, 2, 3], 6, 'symmetric') + b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3]) + assert_array_equal(a, b) + + +class TestWrap: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'wrap') + b = np.array( + [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = np.arange(12) + a = np.reshape(a, (3, 4)) + a = np.pad(a, (10, 12), 'wrap') + b = np.array( + [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 3, 'wrap') + b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 4, 'wrap') + b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) + assert_array_equal(a, b) + + def test_pad_with_zero(self): + a = np.ones((3, 5)) + b = np.pad(a, (0, 5), mode="wrap") + assert_array_equal(a, b[:-5, :-5]) + + def test_repeated_wrapping(self): + """ + Check wrapping on each side individually if the wrapped area is longer + than the original array. + """ + a = np.arange(5) + b = np.pad(a, (12, 0), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][3:], b) + + a = np.arange(5) + b = np.pad(a, (0, 12), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][:-3], b) + + def test_repeated_wrapping_multiple_origin(self): + """ + Assert that 'wrap' pads only with multiples of the original area if + the pad width is larger than the original array. + """ + a = np.arange(4).reshape(2, 2) + a = np.pad(a, [(1, 3), (3, 1)], mode='wrap') + b = np.array( + [[3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0], + [3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0], + [3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0]] + ) + assert_array_equal(a, b) + + +class TestEdge: + def test_check_simple(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = np.pad(a, ((2, 3), (3, 2)), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + def test_check_width_shape_1_2(self): + # Check a pad_width of the form ((1, 2),). + # Regression test for issue gh-7808. + a = np.array([1, 2, 3]) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.array([1, 1, 2, 3, 3, 3]) + assert_array_equal(padded, expected) + + a = np.array([[1, 2, 3], [4, 5, 6]]) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.pad(a, ((1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + a = np.arange(24).reshape(2, 3, 4) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + +class TestEmpty: + def test_simple(self): + arr = np.arange(24).reshape(4, 6) + result = np.pad(arr, [(2, 3), (3, 1)], mode="empty") + assert result.shape == (9, 10) + assert_equal(arr, result[2:-3, 3:-1]) + + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="empty") + assert result.shape == (3, 4, 4) + + +def test_legacy_vector_functionality(): + def _padwithtens(vector, pad_width, iaxis, kwargs): + vector[:pad_width[0]] = 10 + vector[-pad_width[1]:] = 10 + + a = np.arange(6).reshape(2, 3) + a = np.pad(a, 2, _padwithtens) + b = np.array( + [[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]] + ) + assert_array_equal(a, b) + + +def test_unicode_mode(): + a = np.pad([1], 2, mode='constant') + b = np.array([0, 0, 1, 0, 0]) + assert_array_equal(a, b) + + +@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"]) +def test_object_input(mode): + # Regression test for issue gh-11395. + a = np.full((4, 3), fill_value=None) + pad_amt = ((2, 3), (3, 2)) + b = np.full((9, 8), fill_value=None) + assert_array_equal(np.pad(a, pad_amt, mode=mode), b) + + +class TestPadWidth: + @pytest.mark.parametrize("pad_width", [ + (4, 5, 6, 7), + ((1,), (2,), (3,)), + ((1, 2), (3, 4), (5, 6)), + ((3, 4, 5), (0, 1, 2)), + ]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_misshaped_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape((6, 5)) + match = "operands could not be broadcast together" + with pytest.raises(ValueError, match=match): + np.pad(arr, pad_width, mode) + + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_misshaped_pad_width_2(self, mode): + arr = np.arange(30).reshape((6, 5)) + match = ("input operand has more dimensions than allowed by the axis " + "remapping") + with pytest.raises(ValueError, match=match): + np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode) + + @pytest.mark.parametrize( + "pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_negative_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape((6, 5)) + match = "index can't contain negative values" + with pytest.raises(ValueError, match=match): + np.pad(arr, pad_width, mode) + + @pytest.mark.parametrize("pad_width, dtype", [ + ("3", None), + ("word", None), + (None, None), + (object(), None), + (3.4, None), + (((2, 3, 4), (3, 2)), object), + (complex(1, -1), None), + (((-2.1, 3), (3, 2)), None), + ]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_bad_type(self, pad_width, dtype, mode): + arr = np.arange(30).reshape((6, 5)) + match = "`pad_width` must be of integral type." + if dtype is not None: + # avoid DeprecationWarning when not specifying dtype + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width, dtype=dtype), mode) + else: + with pytest.raises(TypeError, match=match): + np.pad(arr, pad_width, mode) + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width), mode) + + def test_pad_width_as_ndarray(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + @pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_zero_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape(6, 5) + assert_array_equal(arr, np.pad(arr, pad_width, mode=mode)) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_kwargs(mode): + """Test behavior of pad's kwargs for the given mode.""" + allowed = _all_modes[mode] + not_allowed = {} + for kwargs in _all_modes.values(): + if kwargs != allowed: + not_allowed.update(kwargs) + # Test if allowed keyword arguments pass + np.pad([1, 2, 3], 1, mode, **allowed) + # Test if prohibited keyword arguments of other modes raise an error + for key, value in not_allowed.items(): + match = "unsupported keyword arguments for mode '{}'".format(mode) + with pytest.raises(ValueError, match=match): + np.pad([1, 2, 3], 1, mode, **{key: value}) + + +def test_constant_zero_default(): + arr = np.array([1, 1]) + assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0]) + + +@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False]) +def test_unsupported_mode(mode): + match= "mode '{}' is not supported".format(mode) + with pytest.raises(ValueError, match=match): + np.pad([1, 2, 3], 4, mode=mode) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_non_contiguous_array(mode): + arr = np.arange(24).reshape(4, 6)[::2, ::2] + result = np.pad(arr, (2, 3), mode) + assert result.shape == (7, 8) + assert_equal(result[2:-3, 2:-3], arr) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_memory_layout_persistence(mode): + """Test if C and F order is preserved for all pad modes.""" + x = np.ones((5, 10), order='C') + assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"] + x = np.ones((5, 10), order='F') + assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"] + + +@pytest.mark.parametrize("dtype", _numeric_dtypes) +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_dtype_persistence(dtype, mode): + arr = np.zeros((3, 2, 1), dtype=dtype) + result = np.pad(arr, 1, mode=mode) + assert result.dtype == dtype diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_arraysetops.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_arraysetops.py new file mode 100644 index 0000000000000000000000000000000000000000..ab8b12cdda4bb4b0a6beee5ff2e7eb08a590d425 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_arraysetops.py @@ -0,0 +1,985 @@ +"""Test functions for 1D array set operations. + +""" +import numpy as np + +from numpy import ( + ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, isin + ) +from numpy.exceptions import AxisError +from numpy.testing import (assert_array_equal, assert_equal, + assert_raises, assert_raises_regex) +import pytest + + +class TestSetOps: + + def test_intersect1d(self): + # unique inputs + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([1, 2, 5]) + c = intersect1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + # non-unique inputs + a = np.array([5, 5, 7, 1, 2]) + b = np.array([2, 1, 4, 3, 3, 1, 5]) + + ed = np.array([1, 2, 5]) + c = intersect1d(a, b) + assert_array_equal(c, ed) + assert_array_equal([], intersect1d([], [])) + + def test_intersect1d_array_like(self): + # See gh-11772 + class Test: + def __array__(self, dtype=None, copy=None): + return np.arange(3) + + a = Test() + res = intersect1d(a, a) + assert_array_equal(res, a) + res = intersect1d([1, 2, 3], [1, 2, 3]) + assert_array_equal(res, [1, 2, 3]) + + def test_intersect1d_indices(self): + # unique inputs + a = np.array([1, 2, 3, 4]) + b = np.array([2, 1, 4, 6]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ee = np.array([1, 2, 4]) + assert_array_equal(c, ee) + assert_array_equal(a[i1], ee) + assert_array_equal(b[i2], ee) + + # non-unique inputs + a = np.array([1, 2, 2, 3, 4, 3, 2]) + b = np.array([1, 8, 4, 2, 2, 3, 2, 3]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ef = np.array([1, 2, 3, 4]) + assert_array_equal(c, ef) + assert_array_equal(a[i1], ef) + assert_array_equal(b[i2], ef) + + # non1d, unique inputs + a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) + b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 6, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + # non1d, not assumed to be uniqueinputs + a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) + b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + def test_setxor1d(self): + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([3, 4, 7]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 2, 3]) + b = np.array([6, 5, 4]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 8, 2, 3]) + b = np.array([6, 5, 4, 8]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setxor1d([], [])) + + def test_ediff1d(self): + zero_elem = np.array([]) + one_elem = np.array([1]) + two_elem = np.array([1, 2]) + + assert_array_equal([], ediff1d(zero_elem)) + assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) + assert_array_equal([0], ediff1d(zero_elem, to_end=0)) + assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) + assert_array_equal([], ediff1d(one_elem)) + assert_array_equal([1], ediff1d(two_elem)) + assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9)) + assert_array_equal([5, 6, 1, 7, 8], + ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8])) + assert_array_equal([1, 9], ediff1d(two_elem, to_end=9)) + assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8])) + assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7)) + assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6])) + + @pytest.mark.parametrize("ary, prepend, append, expected", [ + # should fail because trying to cast + # np.nan standard floating point value + # into an integer array: + (np.array([1, 2, 3], dtype=np.int64), + None, + np.nan, + 'to_end'), + # should fail because attempting + # to downcast to int type: + (np.array([1, 2, 3], dtype=np.int64), + np.array([5, 7, 2], dtype=np.float32), + None, + 'to_begin'), + # should fail because attempting to cast + # two special floating point values + # to integers (on both sides of ary), + # `to_begin` is in the error message as the impl checks this first: + (np.array([1., 3., 9.], dtype=np.int8), + np.nan, + np.nan, + 'to_begin'), + ]) + def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected): + # verify resolution of gh-11490 + + # specifically, raise an appropriate + # Exception when attempting to append or + # prepend with an incompatible type + msg = 'dtype of `{}` must be compatible'.format(expected) + with assert_raises_regex(TypeError, msg): + ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + + @pytest.mark.parametrize( + "ary,prepend,append,expected", + [ + (np.array([1, 2, 3], dtype=np.int16), + 2**16, # will be cast to int16 under same kind rule. + 2**16 + 4, + np.array([0, 1, 1, 4], dtype=np.int16)), + (np.array([1, 2, 3], dtype=np.float32), + np.array([5], dtype=np.float64), + None, + np.array([5, 1, 1], dtype=np.float32)), + (np.array([1, 2, 3], dtype=np.int32), + 0, + 0, + np.array([0, 1, 1, 0], dtype=np.int32)), + (np.array([1, 2, 3], dtype=np.int64), + 3, + -9, + np.array([3, 1, 1, -9], dtype=np.int64)), + ] + ) + def test_ediff1d_scalar_handling(self, + ary, + prepend, + append, + expected): + # maintain backwards-compatibility + # of scalar prepend / append behavior + # in ediff1d following fix for gh-11490 + actual = np.ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + assert_equal(actual, expected) + assert actual.dtype == expected.dtype + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin(self, kind): + def _isin_slow(a, b): + b = np.asarray(b).flatten().tolist() + return a in b + isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1}) + + def assert_isin_equal(a, b): + x = isin(a, b, kind=kind) + y = isin_slow(a, b) + assert_array_equal(x, y) + + # multidimensional arrays in both arguments + a = np.arange(24).reshape([2, 3, 4]) + b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]]) + assert_isin_equal(a, b) + + # array-likes as both arguments + c = [(9, 8), (7, 6)] + d = (9, 7) + assert_isin_equal(c, d) + + # zero-d array: + f = np.array(3) + assert_isin_equal(f, b) + assert_isin_equal(a, f) + assert_isin_equal(f, f) + + # scalar: + assert_isin_equal(5, b) + assert_isin_equal(a, 6) + assert_isin_equal(5, 6) + + # empty array-like: + if kind != "table": + # An empty list will become float64, + # which is invalid for kind="table" + x = [] + assert_isin_equal(x, b) + assert_isin_equal(a, x) + assert_isin_equal(x, x) + + # empty array with various types: + for dtype in [bool, np.int64, np.float64]: + if kind == "table" and dtype == np.float64: + continue + + if dtype in {np.int64, np.float64}: + ar = np.array([10, 20, 30], dtype=dtype) + elif dtype in {bool}: + ar = np.array([True, False, False]) + + empty_array = np.array([], dtype=dtype) + + assert_isin_equal(empty_array, ar) + assert_isin_equal(ar, empty_array) + assert_isin_equal(empty_array, empty_array) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin(self, kind): + # we use two different sizes for the b array here to test the + # two different paths in isin(). + for mult in (1, 10): + # One check without np.array to make sure lists are handled correct + a = [5, 7, 1, 2] + b = [2, 4, 3, 1, 5] * mult + ec = np.array([True, False, True, True]) + c = isin(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a[0] = 8 + ec = np.array([False, False, True, True]) + c = isin(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a[0], a[3] = 4, 8 + ec = np.array([True, False, True, False]) + c = isin(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + ec = [False, True, False, True, True, True, True, True, True, + False, True, False, False, False] + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + b = b + [5, 5, 4] * mult + ec = [True, True, True, True, True, True, True, True, True, True, + True, False, True, True] + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5] * mult) + ec = np.array([True, False, True, True]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 1, 2]) + b = np.array([2, 4, 3, 3, 1, 5] * mult) + ec = np.array([True, False, True, True, True]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 5]) + b = np.array([2, 2] * mult) + ec = np.array([False, False]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5]) + b = np.array([2]) + ec = np.array([False]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + if kind in {None, "sort"}: + assert_array_equal(isin([], [], kind=kind), []) + + def test_isin_char_array(self): + a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b']) + b = np.array(['a', 'c']) + + ec = np.array([True, False, True, False, False, True, False, False]) + c = isin(a, b) + + assert_array_equal(c, ec) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_invert(self, kind): + "Test isin's invert parameter" + # We use two different sizes for the b array here to test the + # two different paths in isin(). + for mult in (1, 10): + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + assert_array_equal(np.invert(isin(a, b, kind=kind)), + isin(a, b, invert=True, kind=kind)) + + # float: + if kind in {None, "sort"}: + for mult in (1, 10): + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5], + dtype=np.float32) + b = [2, 3, 4] * mult + b = np.array(b, dtype=np.float32) + assert_array_equal(np.invert(isin(a, b, kind=kind)), + isin(a, b, invert=True, kind=kind)) + + def test_isin_hit_alternate_algorithm(self): + """Hit the standard isin code with integers""" + # Need extreme range to hit standard code + # This hits it without the use of kind='table' + a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64) + b = np.array([2, 3, 4, 1e9], dtype=np.int64) + expected = np.array([0, 1, 0, 1, 1, 1, 1], dtype=bool) + assert_array_equal(expected, isin(a, b)) + assert_array_equal(np.invert(expected), isin(a, b, invert=True)) + + a = np.array([5, 7, 1, 2], dtype=np.int64) + b = np.array([2, 4, 3, 1, 5, 1e9], dtype=np.int64) + ec = np.array([True, False, True, True]) + c = isin(a, b, assume_unique=True) + assert_array_equal(c, ec) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_boolean(self, kind): + """Test that isin works for boolean input""" + a = np.array([True, False]) + b = np.array([False, False, False]) + expected = np.array([False, True]) + assert_array_equal(expected, + isin(a, b, kind=kind)) + assert_array_equal(np.invert(expected), + isin(a, b, invert=True, kind=kind)) + + @pytest.mark.parametrize("kind", [None, "sort"]) + def test_isin_timedelta(self, kind): + """Test that isin works for timedelta input""" + rstate = np.random.RandomState(0) + a = rstate.randint(0, 100, size=10) + b = rstate.randint(0, 100, size=10) + truth = isin(a, b) + a_timedelta = a.astype("timedelta64[s]") + b_timedelta = b.astype("timedelta64[s]") + assert_array_equal(truth, isin(a_timedelta, b_timedelta, kind=kind)) + + def test_isin_table_timedelta_fails(self): + a = np.array([0, 1, 2], dtype="timedelta64[s]") + b = a + # Make sure it raises a value error: + with pytest.raises(ValueError): + isin(a, b, kind="table") + + @pytest.mark.parametrize( + "dtype1,dtype2", + [ + (np.int8, np.int16), + (np.int16, np.int8), + (np.uint8, np.uint16), + (np.uint16, np.uint8), + (np.uint8, np.int16), + (np.int16, np.uint8), + (np.uint64, np.int64), + ] + ) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_dtype(self, dtype1, dtype2, kind): + """Test that isin works as expected for mixed dtype input.""" + is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger) + ar1 = np.array([0, 0, 1, 1], dtype=dtype1) + + if is_dtype2_signed: + ar2 = np.array([-128, 0, 127], dtype=dtype2) + else: + ar2 = np.array([127, 0, 255], dtype=dtype2) + + expected = np.array([True, True, False, False]) + + expect_failure = kind == "table" and ( + dtype1 == np.int16 and dtype2 == np.int8) + + if expect_failure: + with pytest.raises(RuntimeError, match="exceed the maximum"): + isin(ar1, ar2, kind=kind) + else: + assert_array_equal(isin(ar1, ar2, kind=kind), expected) + + @pytest.mark.parametrize("data", [ + np.array([2**63, 2**63+1], dtype=np.uint64), + np.array([-2**62, -2**62-1], dtype=np.int64), + ]) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_huge_vals(self, kind, data): + """Test values outside intp range (negative ones if 32bit system)""" + query = data[1] + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, True]) + # Also check that nothing weird happens for values can't possibly + # in range. + data = data.astype(np.int32) # clearly different values + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, False]) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_boolean(self, kind): + """Test that isin works as expected for bool/int input.""" + for dtype in np.typecodes["AllInteger"]: + a = np.array([True, False, False], dtype=bool) + b = np.array([0, 0, 0, 0], dtype=dtype) + expected = np.array([False, True, True], dtype=bool) + assert_array_equal(isin(a, b, kind=kind), expected) + + a, b = b, a + expected = np.array([True, True, True, True], dtype=bool) + assert_array_equal(isin(a, b, kind=kind), expected) + + def test_isin_first_array_is_object(self): + ar1 = [None] + ar2 = np.array([1]*10) + expected = np.array([False]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_second_array_is_object(self): + ar1 = 1 + ar2 = np.array([None]*10) + expected = np.array([False]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_both_arrays_are_object(self): + ar1 = [None] + ar2 = np.array([None]*10) + expected = np.array([True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_both_arrays_have_structured_dtype(self): + # Test arrays of a structured data type containing an integer field + # and a field of dtype `object` allowing for arbitrary Python objects + dt = np.dtype([('field1', int), ('field2', object)]) + ar1 = np.array([(1, None)], dtype=dt) + ar2 = np.array([(1, None)]*10, dtype=dt) + expected = np.array([True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_with_arrays_containing_tuples(self): + ar1 = np.array([(1,), 2], dtype=object) + ar2 = np.array([(1,), 2], dtype=object) + expected = np.array([True, True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + result = np.isin(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + # An integer is added at the end of the array to make sure + # that the array builder will create the array with tuples + # and after it's created the integer is removed. + # There's a bug in the array constructor that doesn't handle + # tuples properly and adding the integer fixes that. + ar1 = np.array([(1,), (2, 1), 1], dtype=object) + ar1 = ar1[:-1] + ar2 = np.array([(1,), (2, 1), 1], dtype=object) + ar2 = ar2[:-1] + expected = np.array([True, True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + result = np.isin(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + ar1 = np.array([(1,), (2, 3), 1], dtype=object) + ar1 = ar1[:-1] + ar2 = np.array([(1,), 2], dtype=object) + expected = np.array([True, False]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + result = np.isin(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + def test_isin_errors(self): + """Test that isin raises expected errors.""" + + # Error 1: `kind` is not one of 'sort' 'table' or None. + ar1 = np.array([1, 2, 3, 4, 5]) + ar2 = np.array([2, 4, 6, 8, 10]) + assert_raises(ValueError, isin, ar1, ar2, kind='quicksort') + + # Error 2: `kind="table"` does not work for non-integral arrays. + obj_ar1 = np.array([1, 'a', 3, 'b', 5], dtype=object) + obj_ar2 = np.array([1, 'a', 3, 'b', 5], dtype=object) + assert_raises(ValueError, isin, obj_ar1, obj_ar2, kind='table') + + for dtype in [np.int32, np.int64]: + ar1 = np.array([-1, 2, 3, 4, 5], dtype=dtype) + # The range of this array will overflow: + overflow_ar2 = np.array([-1, np.iinfo(dtype).max], dtype=dtype) + + # Error 3: `kind="table"` will trigger a runtime error + # if there is an integer overflow expected when computing the + # range of ar2 + assert_raises( + RuntimeError, + isin, ar1, overflow_ar2, kind='table' + ) + + # Non-error: `kind=None` will *not* trigger a runtime error + # if there is an integer overflow, it will switch to + # the `sort` algorithm. + result = np.isin(ar1, overflow_ar2, kind=None) + assert_array_equal(result, [True] + [False] * 4) + result = np.isin(ar1, overflow_ar2, kind='sort') + assert_array_equal(result, [True] + [False] * 4) + + def test_union1d(self): + a = np.array([5, 4, 7, 1, 2]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([1, 2, 3, 4, 5, 7]) + c = union1d(a, b) + assert_array_equal(c, ec) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = np.array([[0, 1, 2], [3, 4, 5]]) + y = np.array([0, 1, 2, 3, 4]) + ez = np.array([0, 1, 2, 3, 4, 5]) + z = union1d(x, y) + assert_array_equal(z, ez) + + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + a = np.array([6, 5, 4, 7, 1, 2, 7, 4]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([6, 7]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + a = np.arange(21) + b = np.arange(19) + ec = np.array([19, 20]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setdiff1d([], [])) + a = np.array((), np.uint32) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_unique(self): + a = np.array([3, 2, 1]) + b = np.array([7, 5, 2]) + expected = np.array([3, 1]) + actual = setdiff1d(a, b, assume_unique=True) + assert_equal(actual, expected) + + def test_setdiff1d_char_array(self): + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + def test_manyways(self): + a = np.array([5, 7, 1, 2, 8]) + b = np.array([9, 8, 2, 4, 3, 1, 5]) + + c1 = setxor1d(a, b) + aux1 = intersect1d(a, b) + aux2 = union1d(a, b) + c2 = setdiff1d(aux2, aux1) + assert_array_equal(c1, c2) + + +class TestUnique: + + def test_unique_1d(self): + + def check_all(a, b, i1, i2, c, dt): + base_msg = 'check {0} failed for type {1}' + + msg = base_msg.format('values', dt) + v = unique(a) + assert_array_equal(v, b, msg) + + msg = base_msg.format('return_index', dt) + v, j = unique(a, True, False, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i1, msg) + + msg = base_msg.format('return_inverse', dt) + v, j = unique(a, False, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i2, msg) + + msg = base_msg.format('return_counts', dt) + v, j = unique(a, False, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j, c, msg) + + msg = base_msg.format('return_index and return_inverse', dt) + v, j1, j2 = unique(a, True, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + + msg = base_msg.format('return_index and return_counts', dt) + v, j1, j2 = unique(a, True, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, c, msg) + + msg = base_msg.format('return_inverse and return_counts', dt) + v, j1, j2 = unique(a, False, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i2, msg) + assert_array_equal(j2, c, msg) + + msg = base_msg.format(('return_index, return_inverse ' + 'and return_counts'), dt) + v, j1, j2, j3 = unique(a, True, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + assert_array_equal(j3, c, msg) + + a = [5, 7, 1, 2, 1, 5, 7]*10 + b = [1, 2, 5, 7] + i1 = [2, 3, 0, 1] + i2 = [2, 3, 0, 1, 0, 2, 3]*10 + c = np.multiply([2, 1, 2, 2], 10) + + # test for numeric arrays + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + for dt in types: + aa = np.array(a, dt) + bb = np.array(b, dt) + check_all(aa, bb, i1, i2, c, dt) + + # test for object arrays + dt = 'O' + aa = np.empty(len(a), dt) + aa[:] = a + bb = np.empty(len(b), dt) + bb[:] = b + check_all(aa, bb, i1, i2, c, dt) + + # test for structured arrays + dt = [('', 'i'), ('', 'i')] + aa = np.array(list(zip(a, a)), dt) + bb = np.array(list(zip(b, b)), dt) + check_all(aa, bb, i1, i2, c, dt) + + # test for ticket #2799 + aa = [1. + 0.j, 1 - 1.j, 1] + assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) + + # test for ticket #4785 + a = [(1, 2), (1, 2), (2, 3)] + unq = [1, 2, 3] + inv = [[0, 1], [0, 1], [1, 2]] + a1 = unique(a) + assert_array_equal(a1, unq) + a2, a2_inv = unique(a, return_inverse=True) + assert_array_equal(a2, unq) + assert_array_equal(a2_inv, inv) + + # test for chararrays with return_inverse (gh-5099) + a = np.char.chararray(5) + a[...] = '' + a2, a2_inv = np.unique(a, return_inverse=True) + assert_array_equal(a2_inv, np.zeros(5)) + + # test for ticket #9137 + a = [] + a1_idx = np.unique(a, return_index=True)[1] + a2_inv = np.unique(a, return_inverse=True)[1] + a3_idx, a3_inv = np.unique(a, return_index=True, + return_inverse=True)[1:] + assert_equal(a1_idx.dtype, np.intp) + assert_equal(a2_inv.dtype, np.intp) + assert_equal(a3_idx.dtype, np.intp) + assert_equal(a3_inv.dtype, np.intp) + + # test for ticket 2111 - float + a = [2.0, np.nan, 1.0, np.nan] + ua = [1.0, 2.0, np.nan] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - complex + a = [2.0-1j, np.nan, 1.0+1j, complex(0.0, np.nan), complex(1.0, np.nan)] + ua = [1.0+1j, 2.0-1j, complex(0.0, np.nan)] + ua_idx = [2, 0, 3] + ua_inv = [1, 2, 0, 2, 2] + ua_cnt = [1, 1, 3] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - datetime64 + nat = np.datetime64('nat') + a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat] + ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - timedelta + nat = np.timedelta64('nat') + a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat] + ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for gh-19300 + all_nans = [np.nan] * 4 + ua = [np.nan] + ua_idx = [0] + ua_inv = [0, 0, 0, 0] + ua_cnt = [4] + assert_equal(np.unique(all_nans), ua) + assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt)) + + def test_unique_axis_errors(self): + assert_raises(TypeError, self._run_axis_tests, object) + assert_raises(TypeError, self._run_axis_tests, + [('a', int), ('b', object)]) + + assert_raises(AxisError, unique, np.arange(10), axis=2) + assert_raises(AxisError, unique, np.arange(10), axis=-2) + + def test_unique_axis_list(self): + msg = "Unique failed on list of lists" + inp = [[0, 1, 0], [0, 1, 0]] + inp_arr = np.asarray(inp) + assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg) + assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg) + + def test_unique_axis(self): + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + types.append([('a', int), ('b', int)]) + types.append([('a', int), ('b', float)]) + + for dtype in types: + self._run_axis_tests(dtype) + + msg = 'Non-bitwise-equal booleans test failed' + data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool) + result = np.array([[False, True], [True, True]], dtype=bool) + assert_array_equal(unique(data, axis=0), result, msg) + + msg = 'Negative zero equality test failed' + data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]]) + result = np.array([[-0.0, 0.0]]) + assert_array_equal(unique(data, axis=0), result, msg) + + @pytest.mark.parametrize("axis", [0, -1]) + def test_unique_1d_with_axis(self, axis): + x = np.array([4, 3, 2, 3, 2, 1, 2, 2]) + uniq = unique(x, axis=axis) + assert_array_equal(uniq, [1, 2, 3, 4]) + + @pytest.mark.parametrize("axis", [None, 0, -1]) + def test_unique_inverse_with_axis(self, axis): + x = np.array([[4, 4, 3], [2, 2, 1], [2, 2, 1], [4, 4, 3]]) + uniq, inv = unique(x, return_inverse=True, axis=axis) + assert_equal(inv.ndim, x.ndim if axis is None else 1) + assert_array_equal(x, np.take(uniq, inv, axis=axis)) + + def test_unique_axis_zeros(self): + # issue 15559 + single_zero = np.empty(shape=(2, 0), dtype=np.int8) + uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True, + return_inverse=True, return_counts=True) + + # there's 1 element of shape (0,) along axis 0 + assert_equal(uniq.dtype, single_zero.dtype) + assert_array_equal(uniq, np.empty(shape=(1, 0))) + assert_array_equal(idx, np.array([0])) + assert_array_equal(inv, np.array([0, 0])) + assert_array_equal(cnt, np.array([2])) + + # there's 0 elements of shape (2,) along axis 1 + uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True, + return_inverse=True, return_counts=True) + + assert_equal(uniq.dtype, single_zero.dtype) + assert_array_equal(uniq, np.empty(shape=(2, 0))) + assert_array_equal(idx, np.array([])) + assert_array_equal(inv, np.array([])) + assert_array_equal(cnt, np.array([])) + + # test a "complicated" shape + shape = (0, 2, 0, 3, 0, 4, 0) + multiple_zeros = np.empty(shape=shape) + for axis in range(len(shape)): + expected_shape = list(shape) + if shape[axis] == 0: + expected_shape[axis] = 0 + else: + expected_shape[axis] = 1 + + assert_array_equal(unique(multiple_zeros, axis=axis), + np.empty(shape=expected_shape)) + + def test_unique_masked(self): + # issue 8664 + x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0], + dtype='uint8') + y = np.ma.masked_equal(x, 0) + + v = np.unique(y) + v2, i, c = np.unique(y, return_index=True, return_counts=True) + + msg = 'Unique returned different results when asked for index' + assert_array_equal(v.data, v2.data, msg) + assert_array_equal(v.mask, v2.mask, msg) + + def test_unique_sort_order_with_axis(self): + # These tests fail if sorting along axis is done by treating subarrays + # as unsigned byte strings. See gh-10495. + fmt = "sort order incorrect for integer type '%s'" + for dt in 'bhilq': + a = np.array([[-1], [0]], dt) + b = np.unique(a, axis=0) + assert_array_equal(a, b, fmt % dt) + + def _run_axis_tests(self, dtype): + data = np.array([[0, 1, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [1, 0, 0, 0]]).astype(dtype) + + msg = 'Unique with 1d array and axis=0 failed' + result = np.array([0, 1]) + assert_array_equal(unique(data), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=0 failed' + result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]]) + assert_array_equal(unique(data, axis=0), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=1 failed' + result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) + assert_array_equal(unique(data, axis=1), result.astype(dtype), msg) + + msg = 'Unique with 3d array and axis=2 failed' + data3d = np.array([[[1, 1], + [1, 0]], + [[0, 1], + [0, 0]]]).astype(dtype) + result = np.take(data3d, [1, 0], axis=2) + assert_array_equal(unique(data3d, axis=2), result, msg) + + uniq, idx, inv, cnt = unique(data, axis=0, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=0" + assert_array_equal(data[idx], uniq, msg) + msg = "Unique's return_inverse=True failed with axis=0" + assert_array_equal(np.take(uniq, inv, axis=0), data) + msg = "Unique's return_counts=True failed with axis=0" + assert_array_equal(cnt, np.array([2, 2]), msg) + + uniq, idx, inv, cnt = unique(data, axis=1, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=1" + assert_array_equal(data[:, idx], uniq) + msg = "Unique's return_inverse=True failed with axis=1" + assert_array_equal(np.take(uniq, inv, axis=1), data) + msg = "Unique's return_counts=True failed with axis=1" + assert_array_equal(cnt, np.array([2, 1, 1]), msg) + + def test_unique_nanequals(self): + # issue 20326 + a = np.array([1, 1, np.nan, np.nan, np.nan]) + unq = np.unique(a) + not_unq = np.unique(a, equal_nan=False) + assert_array_equal(unq, np.array([1, np.nan])) + assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan])) + + def test_unique_array_api_functions(self): + arr = np.array([np.nan, 1, 4, 1, 3, 4, np.nan, 5, 1]) + + for res_unique_array_api, res_unique in [ + ( + np.unique_values(arr), + np.unique(arr, equal_nan=False) + ), + ( + np.unique_counts(arr), + np.unique(arr, return_counts=True, equal_nan=False) + ), + ( + np.unique_inverse(arr), + np.unique(arr, return_inverse=True, equal_nan=False) + ), + ( + np.unique_all(arr), + np.unique( + arr, + return_index=True, + return_inverse=True, + return_counts=True, + equal_nan=False + ) + ) + ]: + assert len(res_unique_array_api) == len(res_unique) + for actual, expected in zip(res_unique_array_api, res_unique): + assert_array_equal(actual, expected) + + def test_unique_inverse_shape(self): + # Regression test for https://github.com/numpy/numpy/issues/25552 + arr = np.array([[1, 2, 3], [2, 3, 1]]) + expected_values, expected_inverse = np.unique(arr, return_inverse=True) + expected_inverse = expected_inverse.reshape(arr.shape) + for func in np.unique_inverse, np.unique_all: + result = func(arr) + assert_array_equal(expected_values, result.values) + assert_array_equal(expected_inverse, result.inverse_indices) + assert_array_equal(arr, result.values[result.inverse_indices]) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_arrayterator.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_arrayterator.py new file mode 100644 index 0000000000000000000000000000000000000000..e8ba83ae827d20faa70981d9f034e9ed6087d576 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_arrayterator.py @@ -0,0 +1,46 @@ +from operator import mul +from functools import reduce + +import numpy as np +from numpy.random import randint +from numpy.lib import Arrayterator +from numpy.testing import assert_ + + +def test(): + np.random.seed(np.arange(10)) + + # Create a random array + ndims = randint(5)+1 + shape = tuple(randint(10)+1 for dim in range(ndims)) + els = reduce(mul, shape) + a = np.arange(els) + a.shape = shape + + buf_size = randint(2*els) + b = Arrayterator(a, buf_size) + + # Check that each block has at most ``buf_size`` elements + for block in b: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that all elements are iterated correctly + assert_(list(b.flat) == list(a.flat)) + + # Slice arrayterator + start = [randint(dim) for dim in shape] + stop = [randint(dim)+1 for dim in shape] + step = [randint(dim)+1 for dim in shape] + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + c = b[slice_] + d = a[slice_] + + # Check that each block has at most ``buf_size`` elements + for block in c: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that the arrayterator is sliced correctly + assert_(np.all(c.__array__() == d)) + + # Check that all elements are iterated correctly + assert_(list(c.flat) == list(d.flat)) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_format.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_format.py new file mode 100644 index 0000000000000000000000000000000000000000..cac3ad66f25936466279d006d75dfbe5394fbc65 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_format.py @@ -0,0 +1,1027 @@ +# doctest +r''' Test the .npy file format. + +Set up: + + >>> import sys + >>> from io import BytesIO + >>> from numpy.lib import format + >>> + >>> scalars = [ + ... np.uint8, + ... np.int8, + ... np.uint16, + ... np.int16, + ... np.uint32, + ... np.int32, + ... np.uint64, + ... np.int64, + ... np.float32, + ... np.float64, + ... np.complex64, + ... np.complex128, + ... object, + ... ] + >>> + >>> basic_arrays = [] + >>> + >>> for scalar in scalars: + ... for endian in '<>': + ... dtype = np.dtype(scalar).newbyteorder(endian) + ... basic = np.arange(15).astype(dtype) + ... basic_arrays.extend([ + ... np.array([], dtype=dtype), + ... np.array(10, dtype=dtype), + ... basic, + ... basic.reshape((3,5)), + ... basic.reshape((3,5)).T, + ... basic.reshape((3,5))[::-1,::2], + ... ]) + ... + >>> + >>> Pdescr = [ + ... ('x', 'i4', (2,)), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> PbufferT = [ + ... ([3,2], [[6.,4.],[6.,4.]], 8), + ... ([4,3], [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> Ndescr = [ + ... ('x', 'i4', (2,)), + ... ('Info', [ + ... ('value', 'c16'), + ... ('y2', 'f8'), + ... ('Info2', [ + ... ('name', 'S2'), + ... ('value', 'c16', (2,)), + ... ('y3', 'f8', (2,)), + ... ('z3', 'u4', (2,))]), + ... ('name', 'S2'), + ... ('z2', 'b1')]), + ... ('color', 'S2'), + ... ('info', [ + ... ('Name', 'U8'), + ... ('Value', 'c16')]), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> NbufferT = [ + ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), + ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> record_arrays = [ + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + ... ] + +Test the magic string writing. + + >>> format.magic(1, 0) + '\x93NUMPY\x01\x00' + >>> format.magic(0, 0) + '\x93NUMPY\x00\x00' + >>> format.magic(255, 255) + '\x93NUMPY\xff\xff' + >>> format.magic(2, 5) + '\x93NUMPY\x02\x05' + +Test the magic string reading. + + >>> format.read_magic(BytesIO(format.magic(1, 0))) + (1, 0) + >>> format.read_magic(BytesIO(format.magic(0, 0))) + (0, 0) + >>> format.read_magic(BytesIO(format.magic(255, 255))) + (255, 255) + >>> format.read_magic(BytesIO(format.magic(2, 5))) + (2, 5) + +Test the header writing. + + >>> for arr in basic_arrays + record_arrays: + ... f = BytesIO() + ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it + ... print(repr(f.getvalue())) + ... + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c16', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "v\x00{'descr': [('x', 'i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" + "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" +''' +import sys +import os +import warnings +import pytest +from io import BytesIO + +import numpy as np +from numpy.testing import ( + assert_, assert_array_equal, assert_raises, assert_raises_regex, + assert_warns, IS_PYPY, IS_WASM + ) +from numpy.testing._private.utils import requires_memory +from numpy.lib import format + + +# Generate some basic arrays to test with. +scalars = [ + np.uint8, + np.int8, + np.uint16, + np.int16, + np.uint32, + np.int32, + np.uint64, + np.int64, + np.float32, + np.float64, + np.complex64, + np.complex128, + object, +] +basic_arrays = [] +for scalar in scalars: + for endian in '<>': + dtype = np.dtype(scalar).newbyteorder(endian) + basic = np.arange(1500).astype(dtype) + basic_arrays.extend([ + # Empty + np.array([], dtype=dtype), + # Rank-0 + np.array(10, dtype=dtype), + # 1-D + basic, + # 2-D C-contiguous + basic.reshape((30, 50)), + # 2-D F-contiguous + basic.reshape((30, 50)).T, + # 2-D non-contiguous + basic.reshape((30, 50))[::-1, ::2], + ]) + +# More complicated record arrays. +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), + 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), + 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + +record_arrays = [ + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + np.zeros(1, dtype=[('c', ('= (3, 12), reason="see gh-23988") +@pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup") +def test_python2_python3_interoperability(): + fname = 'win64python2.npy' + path = os.path.join(os.path.dirname(__file__), 'data', fname) + with pytest.warns(UserWarning, match="Reading.*this warning\\."): + data = np.load(path) + assert_array_equal(data, np.ones(2)) + + +def test_pickle_python2_python3(): + # Test that loading object arrays saved on Python 2 works both on + # Python 2 and Python 3 and vice versa + data_dir = os.path.join(os.path.dirname(__file__), 'data') + + expected = np.array([None, range, '\u512a\u826f', + b'\xe4\xb8\x8d\xe8\x89\xaf'], + dtype=object) + + for fname in ['py2-np0-objarr.npy', 'py2-objarr.npy', 'py2-objarr.npz', + 'py3-objarr.npy', 'py3-objarr.npz']: + path = os.path.join(data_dir, fname) + + for encoding in ['bytes', 'latin1']: + data_f = np.load(path, allow_pickle=True, encoding=encoding) + if fname.endswith('.npz'): + data = data_f['x'] + data_f.close() + else: + data = data_f + + if encoding == 'latin1' and fname.startswith('py2'): + assert_(isinstance(data[3], str)) + assert_array_equal(data[:-1], expected[:-1]) + # mojibake occurs + assert_array_equal(data[-1].encode(encoding), expected[-1]) + else: + assert_(isinstance(data[3], bytes)) + assert_array_equal(data, expected) + + if fname.startswith('py2'): + if fname.endswith('.npz'): + data = np.load(path, allow_pickle=True) + assert_raises(UnicodeError, data.__getitem__, 'x') + data.close() + data = np.load(path, allow_pickle=True, fix_imports=False, + encoding='latin1') + assert_raises(ImportError, data.__getitem__, 'x') + data.close() + else: + assert_raises(UnicodeError, np.load, path, + allow_pickle=True) + assert_raises(ImportError, np.load, path, + allow_pickle=True, fix_imports=False, + encoding='latin1') + + +def test_pickle_disallow(tmpdir): + data_dir = os.path.join(os.path.dirname(__file__), 'data') + + path = os.path.join(data_dir, 'py2-objarr.npy') + assert_raises(ValueError, np.load, path, + allow_pickle=False, encoding='latin1') + + path = os.path.join(data_dir, 'py2-objarr.npz') + with np.load(path, allow_pickle=False, encoding='latin1') as f: + assert_raises(ValueError, f.__getitem__, 'x') + + path = os.path.join(tmpdir, 'pickle-disabled.npy') + assert_raises(ValueError, np.save, path, np.array([None], dtype=object), + allow_pickle=False) + +@pytest.mark.parametrize('dt', [ + np.dtype(np.dtype([('a', np.int8), + ('b', np.int16), + ('c', np.int32), + ], align=True), + (3,)), + np.dtype([('x', np.dtype({'names':['a','b'], + 'formats':['i1','i1'], + 'offsets':[0,4], + 'itemsize':8, + }, + (3,)), + (4,), + )]), + np.dtype([('x', + (' 1, a) + assert_array_equal(b, [3, 2, 2, 3, 3]) + + def test_place(self): + # Make sure that non-np.ndarray objects + # raise an error instead of doing nothing + assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1]) + + a = np.array([1, 4, 3, 2, 5, 8, 7]) + place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) + assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) + + place(a, np.zeros(7), []) + assert_array_equal(a, np.arange(1, 8)) + + place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9]) + assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9]) + assert_raises_regex(ValueError, "Cannot insert from an empty array", + lambda: place(a, [0, 0, 0, 0, 0, 1, 0], [])) + + # See Issue #6974 + a = np.array(['12', '34']) + place(a, [0, 1], '9') + assert_array_equal(a, ['12', '9']) + + def test_both(self): + a = rand(10) + mask = a > 0.5 + ac = a.copy() + c = extract(mask, a) + place(a, mask, 0) + place(a, mask, c) + assert_array_equal(a, ac) + + +# _foo1 and _foo2 are used in some tests in TestVectorize. + +def _foo1(x, y=1.0): + return y*math.floor(x) + + +def _foo2(x, y=1.0, z=0.0): + return y*math.floor(x) + z + + +class TestVectorize: + + def test_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_scalar(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], 5) + assert_array_equal(r, [5, 8, 1, 4]) + + def test_large(self): + x = np.linspace(-3, 2, 10000) + f = vectorize(lambda x: x) + y = f(x) + assert_array_equal(y, x) + + def test_ufunc(self): + f = vectorize(math.cos) + args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi]) + r1 = f(args) + r2 = np.cos(args) + assert_array_almost_equal(r1, r2) + + def test_keywords(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(args, 2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order1(self): + # gh-1620: The second call of f would crash with + # `ValueError: invalid number of arguments`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0), 1.0) + r2 = f(np.arange(3.0)) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order2(self): + # gh-1620: The second call of f would crash with + # `ValueError: non-broadcastable output operand with shape () + # doesn't match the broadcast shape (3,)`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0)) + r2 = f(np.arange(3.0), 1.0) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order3(self): + # gh-1620: The third call of f would crash with + # `ValueError: invalid number of arguments`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0)) + r2 = f(np.arange(3.0), y=1.0) + r3 = f(np.arange(3.0)) + assert_array_equal(r1, r2) + assert_array_equal(r1, r3) + + def test_keywords_with_otypes_several_kwd_args1(self): + # gh-1620 Make sure different uses of keyword arguments + # don't break the vectorized function. + f = vectorize(_foo2, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(10.4, z=100) + r2 = f(10.4, y=-1) + r3 = f(10.4) + assert_equal(r1, _foo2(10.4, z=100)) + assert_equal(r2, _foo2(10.4, y=-1)) + assert_equal(r3, _foo2(10.4)) + + def test_keywords_with_otypes_several_kwd_args2(self): + # gh-1620 Make sure different uses of keyword arguments + # don't break the vectorized function. + f = vectorize(_foo2, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(z=100, x=10.4, y=-1) + r2 = f(1, 2, 3) + assert_equal(r1, _foo2(z=100, x=10.4, y=-1)) + assert_equal(r2, _foo2(1, 2, 3)) + + def test_keywords_no_func_code(self): + # This needs to test a function that has keywords but + # no func_code attribute, since otherwise vectorize will + # inspect the func_code. + import random + try: + vectorize(random.randrange) # Should succeed + except Exception: + raise AssertionError() + + def test_keywords2_ticket_2100(self): + # Test kwarg support: enhancement ticket 2100 + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(a=args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(b=1, a=args) + assert_array_equal(r1, r2) + r1 = f(args, b=2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords3_ticket_2100(self): + # Test excluded with mixed positional and kwargs: ticket 2100 + def mypolyval(x, p): + _p = list(p) + res = _p.pop(0) + while _p: + res = res * x + _p.pop(0) + return res + + vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) + ans = [3, 6] + assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3])) + + def test_keywords4_ticket_2100(self): + # Test vectorizing function with no positional args. + @vectorize + def f(**kw): + res = 1.0 + for _k in kw: + res *= kw[_k] + return res + + assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8]) + + def test_keywords5_ticket_2100(self): + # Test vectorizing function with no kwargs args. + @vectorize + def f(*v): + return np.prod(v) + + assert_array_equal(f([1, 2], [3, 4]), [3, 8]) + + def test_coverage1_ticket_2100(self): + def foo(): + return 1 + + f = vectorize(foo) + assert_array_equal(f(), 1) + + def test_assigning_docstring(self): + def foo(x): + """Original documentation""" + return x + + f = vectorize(foo) + assert_equal(f.__doc__, foo.__doc__) + + doc = "Provided documentation" + f = vectorize(foo, doc=doc) + assert_equal(f.__doc__, doc) + + def test_UnboundMethod_ticket_1156(self): + # Regression test for issue 1156 + class Foo: + b = 2 + + def bar(self, a): + return a ** self.b + + assert_array_equal(vectorize(Foo().bar)(np.arange(9)), + np.arange(9) ** 2) + assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), + np.arange(9) ** 2) + + def test_execution_order_ticket_1487(self): + # Regression test for dependence on execution order: issue 1487 + f1 = vectorize(lambda x: x) + res1a = f1(np.arange(3)) + res1b = f1(np.arange(0.1, 3)) + f2 = vectorize(lambda x: x) + res2b = f2(np.arange(0.1, 3)) + res2a = f2(np.arange(3)) + assert_equal(res1a, res2a) + assert_equal(res1b, res2b) + + def test_string_ticket_1892(self): + # Test vectorization over strings: issue 1892. + f = np.vectorize(lambda x: x) + s = '0123456789' * 10 + assert_equal(s, f(s)) + + def test_cache(self): + # Ensure that vectorized func called exactly once per argument. + _calls = [0] + + @vectorize + def f(x): + _calls[0] += 1 + return x ** 2 + + f.cache = True + x = np.arange(5) + assert_array_equal(f(x), x * x) + assert_equal(_calls[0], len(x)) + + def test_otypes(self): + f = np.vectorize(lambda x: x) + f.otypes = 'i' + x = np.arange(5) + assert_array_equal(f(x), x) + + def test_parse_gufunc_signature(self): + assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x,y)->()'), + ([('x', 'y')], [()])) + assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'), + ([('x',), ('y',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y)'), + ([('x',)], [('y',)])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'), + ([('x',)], [('y',), ()])) + assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'), + ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) + + # Tests to check if whitespaces are ignored + assert_equal(nfb._parse_gufunc_signature('(x )->()'), ([('x',)], [()])) + assert_equal(nfb._parse_gufunc_signature('( x , y )->( )'), + ([('x', 'y')], [()])) + assert_equal(nfb._parse_gufunc_signature('(x),( y) ->()'), + ([('x',), ('y',)], [()])) + assert_equal(nfb._parse_gufunc_signature('( x)-> (y ) '), + ([('x',)], [('y',)])) + assert_equal(nfb._parse_gufunc_signature(' (x)->( y),( )'), + ([('x',)], [('y',), ()])) + assert_equal(nfb._parse_gufunc_signature( + '( ), ( a, b,c ) ,( d) -> (d , e)'), + ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) + + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x)(y)->()') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x),(y)->') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('((x))->(x)') + + def test_signature_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract, signature='(),()->()') + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_signature_mean_last(self): + def mean(a): + return a.mean() + + f = vectorize(mean, signature='(n)->()') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [2, 3]) + + def test_signature_center(self): + def center(a): + return a - a.mean() + + f = vectorize(center, signature='(n)->(n)') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [[-1, 1], [-1, 1]]) + + def test_signature_two_outputs(self): + f = vectorize(lambda x: (x, x), signature='()->(),()') + r = f([1, 2, 3]) + assert_(isinstance(r, tuple) and len(r) == 2) + assert_array_equal(r[0], [1, 2, 3]) + assert_array_equal(r[1], [1, 2, 3]) + + def test_signature_outer(self): + f = vectorize(np.outer, signature='(a),(b)->(a,b)') + r = f([1, 2], [1, 2, 3]) + assert_array_equal(r, [[1, 2, 3], [2, 4, 6]]) + + r = f([[[1, 2]]], [1, 2, 3]) + assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]]) + + r = f([[1, 0], [2, 0]], [1, 2, 3]) + assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]], + [[2, 4, 6], [0, 0, 0]]]) + + r = f([1, 2], [[1, 2, 3], [0, 0, 0]]) + assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]], + [[0, 0, 0], [0, 0, 0]]]) + + def test_signature_computed_size(self): + f = vectorize(lambda x: x[:-1], signature='(n)->(m)') + r = f([1, 2, 3]) + assert_array_equal(r, [1, 2]) + + r = f([[1, 2, 3], [2, 3, 4]]) + assert_array_equal(r, [[1, 2], [2, 3]]) + + def test_signature_excluded(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo, signature='()->()', excluded={'b'}) + assert_array_equal(f([1, 2, 3]), [2, 3, 4]) + assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3]) + + def test_signature_otypes(self): + f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64']) + r = f([1, 2, 3]) + assert_equal(r.dtype, np.dtype('float64')) + assert_array_equal(r, [1, 2, 3]) + + def test_signature_invalid_inputs(self): + f = vectorize(operator.add, signature='(n),(n)->(n)') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f([1, 2]) + with assert_raises_regex( + ValueError, 'does not have enough dimensions'): + f(1, 2) + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2], [1, 2, 3]) + + f = vectorize(operator.add, signature='()->()') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f(1, 2) + + def test_signature_invalid_outputs(self): + + f = vectorize(lambda x: x[:-1], signature='(n)->(n)') + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2, 3]) + + f = vectorize(lambda x: x, signature='()->(),()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f(1) + + f = vectorize(lambda x: (x, x), signature='()->()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f([1, 2]) + + def test_size_zero_output(self): + # see issue 5868 + f = np.vectorize(lambda x: x) + x = np.zeros([0, 5], dtype=int) + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f.otypes = 'i' + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='()->()') + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f = np.vectorize(lambda x: x, signature='()->()', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)') + assert_array_equal(f(x.T), x.T) + + f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i') + with assert_raises_regex(ValueError, 'new output dimensions'): + f(x) + + def test_subclasses(self): + class subclass(np.ndarray): + pass + + m = np.array([[1., 0., 0.], + [0., 0., 1.], + [0., 1., 0.]]).view(subclass) + v = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]).view(subclass) + # generalized (gufunc) + matvec = np.vectorize(np.matmul, signature='(m,m),(m)->(m)') + r = matvec(m, v) + assert_equal(type(r), subclass) + assert_equal(r, [[1., 3., 2.], [4., 6., 5.], [7., 9., 8.]]) + + # element-wise (ufunc) + mult = np.vectorize(lambda x, y: x*y) + r = mult(m, v) + assert_equal(type(r), subclass) + assert_equal(r, m * v) + + def test_name(self): + #See gh-23021 + @np.vectorize + def f2(a, b): + return a + b + + assert f2.__name__ == 'f2' + + def test_decorator(self): + @vectorize + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + r = addsubtract([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_docstring(self): + @vectorize + def f(x): + """Docstring""" + return x + + if sys.flags.optimize < 2: + assert f.__doc__ == "Docstring" + + def test_partial(self): + def foo(x, y): + return x + y + + bar = partial(foo, 3) + vbar = np.vectorize(bar) + assert vbar(1) == 4 + + def test_signature_otypes_decorator(self): + @vectorize(signature='(n)->(n)', otypes=['float64']) + def f(x): + return x + + r = f([1, 2, 3]) + assert_equal(r.dtype, np.dtype('float64')) + assert_array_equal(r, [1, 2, 3]) + assert f.__name__ == 'f' + + def test_bad_input(self): + with assert_raises(TypeError): + A = np.vectorize(pyfunc = 3) + + def test_no_keywords(self): + with assert_raises(TypeError): + @np.vectorize("string") + def foo(): + return "bar" + + def test_positional_regression_9477(self): + # This supplies the first keyword argument as a positional, + # to ensure that they are still properly forwarded after the + # enhancement for #9477 + f = vectorize((lambda x: x), ['float64']) + r = f([2]) + assert_equal(r.dtype, np.dtype('float64')) + + def test_datetime_conversion(self): + otype = "datetime64[ns]" + arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], + dtype='datetime64[ns]') + assert_array_equal(np.vectorize(lambda x: x, signature="(i)->(j)", + otypes=[otype])(arr), arr) + + +class TestLeaks: + class A: + iters = 20 + + def bound(self, *args): + return 0 + + @staticmethod + def unbound(*args): + return 0 + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.parametrize('name, incr', [ + ('bound', A.iters), + ('unbound', 0), + ]) + def test_frompyfunc_leaks(self, name, incr): + # exposed in gh-11867 as np.vectorized, but the problem stems from + # frompyfunc. + # class.attribute = np.frompyfunc() creates a + # reference cycle if is a bound class method. It requires a + # gc collection cycle to break the cycle (on CPython 3) + import gc + A_func = getattr(self.A, name) + gc.disable() + try: + refcount = sys.getrefcount(A_func) + for i in range(self.A.iters): + a = self.A() + a.f = np.frompyfunc(getattr(a, name), 1, 1) + out = a.f(np.arange(10)) + a = None + # A.func is part of a reference cycle if incr is non-zero + assert_equal(sys.getrefcount(A_func), refcount + incr) + for i in range(5): + gc.collect() + assert_equal(sys.getrefcount(A_func), refcount) + finally: + gc.enable() + + +class TestDigitize: + + def test_forward(self): + x = np.arange(-6, 5) + bins = np.arange(-5, 5) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(5, -5, -1) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_random(self): + x = rand(10) + bin = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bin) != 0)) + + def test_right_basic(self): + x = [1, 5, 4, 10, 8, 11, 0] + bins = [1, 5, 10] + default_answer = [1, 2, 1, 3, 2, 3, 0] + assert_array_equal(digitize(x, bins), default_answer) + right_answer = [0, 1, 1, 2, 2, 3, 0] + assert_array_equal(digitize(x, bins, True), right_answer) + + def test_right_open(self): + x = np.arange(-6, 5) + bins = np.arange(-6, 4) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(4, -6, -1) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_random(self): + x = rand(10) + bins = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bins, True) != 10)) + + def test_monotonic(self): + x = [-1, 0, 1, 2] + bins = [0, 0, 1] + assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3]) + assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3]) + bins = [1, 1, 0] + assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0]) + assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0]) + bins = [1, 1, 1, 1] + assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4]) + assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4]) + bins = [0, 0, 1, 0] + assert_raises(ValueError, digitize, x, bins) + bins = [1, 1, 0, 1] + assert_raises(ValueError, digitize, x, bins) + + def test_casting_error(self): + x = [1, 2, 3 + 1.j] + bins = [1, 2, 3] + assert_raises(TypeError, digitize, x, bins) + x, bins = bins, x + assert_raises(TypeError, digitize, x, bins) + + def test_return_type(self): + # Functions returning indices should always return base ndarrays + class A(np.ndarray): + pass + a = np.arange(5).view(A) + b = np.arange(1, 3).view(A) + assert_(not isinstance(digitize(b, a, False), A)) + assert_(not isinstance(digitize(b, a, True), A)) + + def test_large_integers_increasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x - 1, x + 1]), 1) + + @pytest.mark.xfail( + reason="gh-11022: np._core.multiarray._monoticity loses precision") + def test_large_integers_decreasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x + 1, x - 1]), 1) + + +class TestUnwrap: + + def test_simple(self): + # check that unwrap removes jumps greater that 2*pi + assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) + + def test_period(self): + # check that unwrap removes jumps greater that 255 + assert_array_equal(unwrap([1, 1 + 256], period=255), [1, 2]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 1000, period=255)) < 255)) + # check simple case + simple_seq = np.array([0, 75, 150, 225, 300]) + wrap_seq = np.mod(simple_seq, 255) + assert_array_equal(unwrap(wrap_seq, period=255), simple_seq) + # check custom discont value + uneven_seq = np.array([0, 75, 150, 225, 300, 430]) + wrap_uneven = np.mod(uneven_seq, 250) + no_discont = unwrap(wrap_uneven, period=250) + assert_array_equal(no_discont, [0, 75, 150, 225, 300, 180]) + sm_discont = unwrap(wrap_uneven, period=250, discont=140) + assert_array_equal(sm_discont, [0, 75, 150, 225, 300, 430]) + assert sm_discont.dtype == wrap_uneven.dtype + + +@pytest.mark.parametrize( + "dtype", "O" + np.typecodes["AllInteger"] + np.typecodes["Float"] +) +@pytest.mark.parametrize("M", [0, 1, 10]) +class TestFilterwindows: + + def test_hanning(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = hanning(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.500, 4) + + def test_hamming(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = hamming(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) + + def test_bartlett(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = bartlett(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) + + def test_blackman(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = blackman(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) + + def test_kaiser(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = kaiser(scalar, 0) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 10, 15) + + +class TestTrapezoid: + + def test_simple(self): + x = np.arange(-10, 10, .1) + r = trapezoid(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1) + # check integral of normal equals 1 + assert_almost_equal(r, 1, 7) + + def test_ndim(self): + x = np.linspace(0, 1, 3) + y = np.linspace(0, 2, 8) + z = np.linspace(0, 3, 13) + + wx = np.ones_like(x) * (x[1] - x[0]) + wx[0] /= 2 + wx[-1] /= 2 + wy = np.ones_like(y) * (y[1] - y[0]) + wy[0] /= 2 + wy[-1] /= 2 + wz = np.ones_like(z) * (z[1] - z[0]) + wz[0] /= 2 + wz[-1] /= 2 + + q = x[:, None, None] + y[None,:, None] + z[None, None,:] + + qx = (q * wx[:, None, None]).sum(axis=0) + qy = (q * wy[None, :, None]).sum(axis=1) + qz = (q * wz[None, None, :]).sum(axis=2) + + # n-d `x` + r = trapezoid(q, x=x[:, None, None], axis=0) + assert_almost_equal(r, qx) + r = trapezoid(q, x=y[None, :, None], axis=1) + assert_almost_equal(r, qy) + r = trapezoid(q, x=z[None, None, :], axis=2) + assert_almost_equal(r, qz) + + # 1-d `x` + r = trapezoid(q, x=x, axis=0) + assert_almost_equal(r, qx) + r = trapezoid(q, x=y, axis=1) + assert_almost_equal(r, qy) + r = trapezoid(q, x=z, axis=2) + assert_almost_equal(r, qz) + + def test_masked(self): + # Testing that masked arrays behave as if the function is 0 where + # masked + x = np.arange(5) + y = x * x + mask = x == 2 + ym = np.ma.array(y, mask=mask) + r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) + assert_almost_equal(trapezoid(ym, x), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapezoid(ym, xm), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapezoid(y, xm), r) + + +class TestSinc: + + def test_simple(self): + assert_(sinc(0) == 1) + w = sinc(np.linspace(-1, 1, 100)) + # check symmetry + assert_array_almost_equal(w, flipud(w), 7) + + def test_array_like(self): + x = [0, 0.5] + y1 = sinc(np.array(x)) + y2 = sinc(list(x)) + y3 = sinc(tuple(x)) + assert_array_equal(y1, y2) + assert_array_equal(y1, y3) + + +class TestUnique: + + def test_simple(self): + x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) + assert_(np.all(unique(x) == [0, 1, 2, 3, 4])) + assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1])) + x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham'] + assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget'])) + x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) + assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) + + +class TestCheckFinite: + + def test_simple(self): + a = [1, 2, 3] + b = [1, 2, np.inf] + c = [1, 2, np.nan] + np.asarray_chkfinite(a) + assert_raises(ValueError, np.asarray_chkfinite, b) + assert_raises(ValueError, np.asarray_chkfinite, c) + + def test_dtype_order(self): + # Regression test for missing dtype and order arguments + a = [1, 2, 3] + a = np.asarray_chkfinite(a, order='F', dtype=np.float64) + assert_(a.dtype == np.float64) + + +class TestCorrCoef: + A = np.array( + [[0.15391142, 0.18045767, 0.14197213], + [0.70461506, 0.96474128, 0.27906989], + [0.9297531, 0.32296769, 0.19267156]]) + B = np.array( + [[0.10377691, 0.5417086, 0.49807457], + [0.82872117, 0.77801674, 0.39226705], + [0.9314666, 0.66800209, 0.03538394]]) + res1 = np.array( + [[1., 0.9379533, -0.04931983], + [0.9379533, 1., 0.30007991], + [-0.04931983, 0.30007991, 1.]]) + res2 = np.array( + [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523], + [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386], + [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601], + [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113], + [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823], + [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]]) + + def test_non_array(self): + assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), + [[1., -1.], [-1., 1.]]) + + def test_simple(self): + tgt1 = corrcoef(self.A) + assert_almost_equal(tgt1, self.res1) + assert_(np.all(np.abs(tgt1) <= 1.0)) + + tgt2 = corrcoef(self.A, self.B) + assert_almost_equal(tgt2, self.res2) + assert_(np.all(np.abs(tgt2) <= 1.0)) + + def test_ddof(self): + # ddof raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1) + sup.filter(DeprecationWarning) + # ddof has no or negligible effect on the function + assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) + assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) + assert_almost_equal(corrcoef(self.A, ddof=3), self.res1) + assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2) + + def test_bias(self): + # bias raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) + assert_warns(DeprecationWarning, corrcoef, self.A, bias=0) + sup.filter(DeprecationWarning) + # bias has no or negligible effect on the function + assert_almost_equal(corrcoef(self.A, bias=1), self.res1) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = corrcoef(x) + tgt = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(res, tgt) + assert_(np.all(np.abs(res) <= 1.0)) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(corrcoef(np.array([])), np.nan) + assert_array_equal(corrcoef(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(corrcoef(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_extreme(self): + x = [[1e-100, 1e100], [1e100, 1e-100]] + with np.errstate(all='raise'): + c = corrcoef(x) + assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) + assert_(np.all(np.abs(c) <= 1.0)) + + @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + def test_corrcoef_dtype(self, test_type): + cast_A = self.A.astype(test_type) + res = corrcoef(cast_A, dtype=test_type) + assert test_type == res.dtype + + +class TestCov: + x1 = np.array([[0, 2], [1, 1], [2, 0]]).T + res1 = np.array([[1., -1.], [-1., 1.]]) + x2 = np.array([0.0, 1.0, 2.0], ndmin=2) + frequencies = np.array([1, 4, 1]) + x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T + res2 = np.array([[0.4, -0.4], [-0.4, 0.4]]) + unit_frequencies = np.ones(3, dtype=np.int_) + weights = np.array([1.0, 4.0, 1.0]) + res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]]) + unit_weights = np.ones(3) + x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964]) + + def test_basic(self): + assert_allclose(cov(self.x1), self.res1) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(cov(x), res) + assert_allclose(cov(x, aweights=np.ones(3)), res) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(np.array([])), np.nan) + assert_array_equal(cov(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(cov(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_wrong_ddof(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(self.x1, ddof=5), + np.array([[np.inf, -np.inf], + [-np.inf, np.inf]])) + + def test_1D_rowvar(self): + assert_allclose(cov(self.x3), cov(self.x3, rowvar=False)) + y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501]) + assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=False)) + + def test_1D_variance(self): + assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1)) + + def test_fweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies), + self.res1) + nonint = self.frequencies + 0.5 + assert_raises(TypeError, cov, self.x1, fweights=nonint) + f = np.ones((2, 3), dtype=np.int_) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = np.ones(2, dtype=np.int_) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = -1 * np.ones(3, dtype=np.int_) + assert_raises(ValueError, cov, self.x1, fweights=f) + + def test_aweights(self): + assert_allclose(cov(self.x1, aweights=self.weights), self.res3) + assert_allclose(cov(self.x1, aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1) + w = np.ones((2, 3)) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = np.ones(2) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = -1.0 * np.ones(3) + assert_raises(ValueError, cov, self.x1, aweights=w) + + def test_unit_fweights_and_aweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies, + aweights=self.unit_weights), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies, + aweights=self.unit_weights), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.weights), + self.res3) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + + @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + def test_cov_dtype(self, test_type): + cast_x1 = self.x1.astype(test_type) + res = cov(cast_x1, dtype=test_type) + assert test_type == res.dtype + + +class Test_I0: + + def test_simple(self): + assert_almost_equal( + i0(0.5), + np.array(1.0634833707413234)) + + # need at least one test above 8, as the implementation is piecewise + A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0]) + expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049, 2815.71662847]) + assert_almost_equal(i0(A), expected) + assert_almost_equal(i0(-A), expected) + + B = np.array([[0.827002, 0.99959078], + [0.89694769, 0.39298162], + [0.37954418, 0.05206293], + [0.36465447, 0.72446427], + [0.48164949, 0.50324519]]) + assert_almost_equal( + i0(B), + np.array([[1.17843223, 1.26583466], + [1.21147086, 1.03898290], + [1.03633899, 1.00067775], + [1.03352052, 1.13557954], + [1.05884290, 1.06432317]])) + # Regression test for gh-11205 + i0_0 = np.i0([0.]) + assert_equal(i0_0.shape, (1,)) + assert_array_equal(np.i0([0.]), np.array([1.])) + + def test_non_array(self): + a = np.arange(4) + + class array_like: + __array_interface__ = a.__array_interface__ + + def __array_wrap__(self, arr, context, return_scalar): + return self + + # E.g. pandas series survive ufunc calls through array-wrap: + assert isinstance(np.abs(array_like()), array_like) + exp = np.i0(a) + res = np.i0(array_like()) + + assert_array_equal(exp, res) + + def test_complex(self): + a = np.array([0, 1 + 2j]) + with pytest.raises(TypeError, match="i0 not supported for complex values"): + res = i0(a) + + +class TestKaiser: + + def test_simple(self): + assert_(np.isfinite(kaiser(1, 1.0))) + assert_almost_equal(kaiser(0, 1.0), + np.array([])) + assert_almost_equal(kaiser(2, 1.0), + np.array([0.78984831, 0.78984831])) + assert_almost_equal(kaiser(5, 1.0), + np.array([0.78984831, 0.94503323, 1., + 0.94503323, 0.78984831])) + assert_almost_equal(kaiser(5, 1.56789), + np.array([0.58285404, 0.88409679, 1., + 0.88409679, 0.58285404])) + + def test_int_beta(self): + kaiser(3, 4) + + +class TestMeshgrid: + + def test_simple(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) + assert_array_equal(X, np.array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3], + [1, 2, 3]])) + assert_array_equal(Y, np.array([[4, 4, 4], + [5, 5, 5], + [6, 6, 6], + [7, 7, 7]])) + + def test_single_input(self): + [X] = meshgrid([1, 2, 3, 4]) + assert_array_equal(X, np.array([1, 2, 3, 4])) + + def test_no_input(self): + args = [] + assert_array_equal([], meshgrid(*args)) + assert_array_equal([], meshgrid(*args, copy=False)) + + def test_indexing(self): + x = [1, 2, 3] + y = [4, 5, 6, 7] + [X, Y] = meshgrid(x, y, indexing='ij') + assert_array_equal(X, np.array([[1, 1, 1, 1], + [2, 2, 2, 2], + [3, 3, 3, 3]])) + assert_array_equal(Y, np.array([[4, 5, 6, 7], + [4, 5, 6, 7], + [4, 5, 6, 7]])) + + # Test expected shapes: + z = [8, 9] + assert_(meshgrid(x, y)[0].shape == (4, 3)) + assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4)) + assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2)) + assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2)) + + assert_raises(ValueError, meshgrid, x, y, indexing='notvalid') + + def test_sparse(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) + assert_array_equal(X, np.array([[1, 2, 3]])) + assert_array_equal(Y, np.array([[4], [5], [6], [7]])) + + def test_invalid_arguments(self): + # Test that meshgrid complains about invalid arguments + # Regression test for issue #4755: + # https://github.com/numpy/numpy/issues/4755 + assert_raises(TypeError, meshgrid, + [1, 2, 3], [4, 5, 6, 7], indices='ij') + + def test_return_type(self): + # Test for appropriate dtype in returned arrays. + # Regression test for issue #5297 + # https://github.com/numpy/numpy/issues/5297 + x = np.arange(0, 10, dtype=np.float32) + y = np.arange(10, 20, dtype=np.float64) + + X, Y = np.meshgrid(x,y) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # copy + X, Y = np.meshgrid(x,y, copy=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # sparse + X, Y = np.meshgrid(x,y, sparse=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + def test_writeback(self): + # Issue 8561 + X = np.array([1.1, 2.2]) + Y = np.array([3.3, 4.4]) + x, y = np.meshgrid(X, Y, sparse=False, copy=True) + + x[0, :] = 0 + assert_equal(x[0, :], 0) + assert_equal(x[1, :], X) + + def test_nd_shape(self): + a, b, c, d, e = np.meshgrid(*([0] * i for i in range(1, 6))) + expected_shape = (2, 1, 3, 4, 5) + assert_equal(a.shape, expected_shape) + assert_equal(b.shape, expected_shape) + assert_equal(c.shape, expected_shape) + assert_equal(d.shape, expected_shape) + assert_equal(e.shape, expected_shape) + + def test_nd_values(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5]) + assert_equal(a, [[[0, 0, 0]], [[0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1]], [[2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5]], [[3, 4, 5]]]) + + def test_nd_indexing(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5], indexing='ij') + assert_equal(a, [[[0, 0, 0], [0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1], [2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5], [3, 4, 5]]]) + + +class TestPiecewise: + + def test_simple(self): + # Condition is single bool list + x = piecewise([0, 0], [True, False], [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: single bool list + x = piecewise([0, 0], [[True, False]], [1]) + assert_array_equal(x, [1, 0]) + + # Conditions is single bool array + x = piecewise([0, 0], np.array([True, False]), [1]) + assert_array_equal(x, [1, 0]) + + # Condition is single int array + x = piecewise([0, 0], np.array([1, 0]), [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: int array + x = piecewise([0, 0], [np.array([1, 0])], [1]) + assert_array_equal(x, [1, 0]) + + x = piecewise([0, 0], [[False, True]], [lambda x:-1]) + assert_array_equal(x, [0, -1]) + + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], []) + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], [1, 2, 3]) + + def test_two_conditions(self): + x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) + assert_array_equal(x, [3, 4]) + + def test_scalar_domains_three_conditions(self): + x = piecewise(3, [True, False, False], [4, 2, 0]) + assert_equal(x, 4) + + def test_default(self): + # No value specified for x[1], should be 0 + x = piecewise([1, 2], [True, False], [2]) + assert_array_equal(x, [2, 0]) + + # Should set x[1] to 3 + x = piecewise([1, 2], [True, False], [2, 3]) + assert_array_equal(x, [2, 3]) + + def test_0d(self): + x = np.array(3) + y = piecewise(x, x > 3, [4, 0]) + assert_(y.ndim == 0) + assert_(y == 0) + + x = 5 + y = piecewise(x, [True, False], [1, 0]) + assert_(y.ndim == 0) + assert_(y == 1) + + # With 3 ranges (It was failing, before) + y = piecewise(x, [False, False, True], [1, 2, 3]) + assert_array_equal(y, 3) + + def test_0d_comparison(self): + x = 3 + y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed. + assert_equal(y, 4) + + # With 3 ranges (It was failing, before) + x = 4 + y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3]) + assert_array_equal(y, 2) + + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1]) + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1]) + + def test_0d_0d_condition(self): + x = np.array(3) + c = np.array(x > 3) + y = piecewise(x, [c], [1, 2]) + assert_equal(y, 2) + + def test_multidimensional_extrafunc(self): + x = np.array([[-2.5, -1.5, -0.5], + [0.5, 1.5, 2.5]]) + y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3]) + assert_array_equal(y, np.array([[-1., -1., -1.], + [3., 3., 1.]])) + + def test_subclasses(self): + class subclass(np.ndarray): + pass + x = np.arange(5.).view(subclass) + r = piecewise(x, [x<2., x>=4], [-1., 1., 0.]) + assert_equal(type(r), subclass) + assert_equal(r, [-1., -1., 0., 0., 1.]) + + +class TestBincount: + + def test_simple(self): + y = np.bincount(np.arange(4)) + assert_array_equal(y, np.ones(4)) + + def test_simple2(self): + y = np.bincount(np.array([1, 5, 2, 4, 1])) + assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) + + def test_simple_weight(self): + x = np.arange(4) + w = np.array([0.2, 0.3, 0.5, 0.1]) + y = np.bincount(x, w) + assert_array_equal(y, w) + + def test_simple_weight2(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) + + def test_with_minlength(self): + x = np.array([0, 1, 0, 1, 1]) + y = np.bincount(x, minlength=3) + assert_array_equal(y, np.array([2, 3, 0])) + x = [] + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([])) + + def test_with_minlength_smaller_than_maxvalue(self): + x = np.array([0, 1, 1, 2, 2, 3, 3]) + y = np.bincount(x, minlength=2) + assert_array_equal(y, np.array([1, 2, 2, 2])) + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([1, 2, 2, 2])) + + def test_with_minlength_and_weights(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w, 8) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) + + def test_empty(self): + x = np.array([], dtype=int) + y = np.bincount(x) + assert_array_equal(x, y) + + def test_empty_with_minlength(self): + x = np.array([], dtype=int) + y = np.bincount(x, minlength=5) + assert_array_equal(y, np.zeros(5, dtype=int)) + + def test_with_incorrect_minlength(self): + x = np.array([], dtype=int) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + x = np.arange(5) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_dtype_reference_leaks(self): + # gh-6805 + intp_refcount = sys.getrefcount(np.dtype(np.intp)) + double_refcount = sys.getrefcount(np.dtype(np.double)) + + for j in range(10): + np.bincount([1, 2, 3]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + for j in range(10): + np.bincount([1, 2, 3], [4, 5, 6]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + @pytest.mark.parametrize("vals", [[[2, 2]], 2]) + def test_error_not_1d(self, vals): + # Test that values has to be 1-D (both as array and nested list) + vals_arr = np.asarray(vals) + with assert_raises(ValueError): + np.bincount(vals_arr) + with assert_raises(ValueError): + np.bincount(vals) + + +class TestInterp: + + def test_exceptions(self): + assert_raises(ValueError, interp, 0, [], []) + assert_raises(ValueError, interp, 0, [0], [1, 2]) + assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0) + assert_raises(ValueError, interp, 0, [], [], period=360) + assert_raises(ValueError, interp, 0, [0], [1, 2], period=360) + + def test_basic(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.linspace(0, 1, 50) + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_right_left_behavior(self): + # Needs range of sizes to test different code paths. + # size ==1 is special cased, 1 < size < 5 is linear search, and + # size >= 5 goes through local search and possibly binary search. + for size in range(1, 10): + xp = np.arange(size, dtype=np.double) + yp = np.ones(size, dtype=np.double) + incpts = np.array([-1, 0, size - 1, size], dtype=np.double) + decpts = incpts[::-1] + + incres = interp(incpts, xp, yp) + decres = interp(decpts, xp, yp) + inctgt = np.array([1, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0) + decres = interp(decpts, xp, yp, left=0) + inctgt = np.array([0, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, right=2) + decres = interp(decpts, xp, yp, right=2) + inctgt = np.array([1, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0, right=2) + decres = interp(decpts, xp, yp, left=0, right=2) + inctgt = np.array([0, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + def test_scalar_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = 0 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = .3 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float32(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float64(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.nan + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_non_finite_behavior_exact_x(self): + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4]) + fp = [1, 2, np.nan, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4]) + + @pytest.fixture(params=[ + lambda x: np.float64(x), + lambda x: _make_complex(x, 0), + lambda x: _make_complex(0, x), + lambda x: _make_complex(x, np.multiply(x, -2)) + ], ids=[ + 'real', + 'complex-real', + 'complex-imag', + 'complex-both' + ]) + def sc(self, request): + """ scale function used by the below tests """ + return request.param + + def test_non_finite_any_nan(self, sc): + """ test that nans are propagated """ + assert_equal(np.interp(0.5, [np.nan, 1], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, np.nan], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([np.nan, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([ 0, np.nan])), sc(np.nan)) + + def test_non_finite_inf(self, sc): + """ Test that interp between opposite infs gives nan """ + assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan)) + + # unless the y values are equal + assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) + + def test_non_finite_half_inf_xf(self, sc): + """ Test that interp where both axes have a bound at inf gives nan """ + assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan)) + + def test_non_finite_half_inf_x(self, sc): + """ Test interp where the x axis has a bound at inf """ + assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10)) + assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0)) + assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0)) + + def test_non_finite_half_inf_f(self, sc): + """ Test interp where the f axis has a bound at inf """ + assert_equal(np.interp(0.5, [0, 1], sc([ 0, -np.inf])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([ 0, +np.inf])), sc(+np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, 10])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, 10])), sc(+np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, -np.inf])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, +np.inf])), sc(+np.inf)) + + def test_complex_interp(self): + # test complex interpolation + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j + x0 = 0.3 + y0 = x0 + (1+x0)*1.0j + assert_almost_equal(np.interp(x0, x, y), y0) + # test complex left and right + x0 = -1 + left = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, left=left), left) + x0 = 2.0 + right = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, right=right), right) + # test complex non finite + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2+1j, np.inf, 4] + y = [1, 2+1j, np.inf+0.5j, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), y) + # test complex periodic + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5+1.0j, 10+2j, 3+3j, 4+4j] + y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j, + 3.5+3.5j, 3.75+3.75j] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + def test_zero_dimensional_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.array(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + + xp = np.array([0, 2, 4]) + fp = np.array([1, -1, 1]) + + actual = np.interp(np.array(1), xp, fp) + assert_equal(actual, 0) + assert_(isinstance(actual, np.float64)) + + actual = np.interp(np.array(4.5), xp, fp, period=4) + assert_equal(actual, 0.5) + assert_(isinstance(actual, np.float64)) + + def test_if_len_x_is_small(self): + xp = np.arange(0, 10, 0.0001) + fp = np.sin(xp) + assert_almost_equal(np.interp(np.pi, xp, fp), 0.0) + + def test_period(self): + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5, 10, 3, 4] + y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + x = np.array(x, order='F').reshape(2, -1) + y = np.array(y, order='C').reshape(2, -1) + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + +class TestPercentile: + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, 0), 0.) + assert_equal(np.percentile(x, 100), 3.5) + assert_equal(np.percentile(x, 50), 1.75) + x[1] = np.nan + assert_equal(np.percentile(x, 0), np.nan) + assert_equal(np.percentile(x, 0, method='nearest'), np.nan) + assert_equal(np.percentile(x, 0, method='inverted_cdf'), np.nan) + assert_equal( + np.percentile(x, 0, method='inverted_cdf', + weights=np.ones_like(x)), + np.nan, + ) + + def test_fraction(self): + x = [Fraction(i, 2) for i in range(8)] + + p = np.percentile(x, Fraction(0)) + assert_equal(p, Fraction(0)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, Fraction(100)) + assert_equal(p, Fraction(7, 2)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, Fraction(50)) + assert_equal(p, Fraction(7, 4)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, [Fraction(50)]) + assert_equal(p, np.array([Fraction(7, 4)])) + assert_equal(type(p), np.ndarray) + + def test_api(self): + d = np.ones(5) + np.percentile(d, 5, None, None, False) + np.percentile(d, 5, None, None, False, 'linear') + o = np.ones((1,)) + np.percentile(d, 5, None, o, False, 'linear') + + def test_complex(self): + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + + def test_2D(self): + x = np.array([[1, 1, 1], + [1, 1, 1], + [4, 4, 3], + [1, 1, 1], + [1, 1, 1]]) + assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1]) + + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + def test_linear_nan_1D(self, dtype): + # METHOD 1 of H&F + arr = np.asarray([15.0, np.nan, 35.0, 40.0, 50.0], dtype=dtype) + res = np.percentile( + arr, + 40.0, + method="linear") + np.testing.assert_equal(res, np.nan) + np.testing.assert_equal(res.dtype, arr.dtype) + + H_F_TYPE_CODES = [(int_type, np.float64) + for int_type in np.typecodes["AllInteger"] + ] + [(np.float16, np.float16), + (np.float32, np.float32), + (np.float64, np.float64), + (np.longdouble, np.longdouble), + (np.dtype("O"), np.float64)] + + @pytest.mark.parametrize(["function", "quantile"], + [(np.quantile, 0.4), + (np.percentile, 40.0)]) + @pytest.mark.parametrize(["input_dtype", "expected_dtype"], H_F_TYPE_CODES) + @pytest.mark.parametrize(["method", "weighted", "expected"], + [("inverted_cdf", False, 20), + ("inverted_cdf", True, 20), + ("averaged_inverted_cdf", False, 27.5), + ("closest_observation", False, 20), + ("interpolated_inverted_cdf", False, 20), + ("hazen", False, 27.5), + ("weibull", False, 26), + ("linear", False, 29), + ("median_unbiased", False, 27), + ("normal_unbiased", False, 27.125), + ]) + def test_linear_interpolation(self, + function, + quantile, + method, + weighted, + expected, + input_dtype, + expected_dtype): + expected_dtype = np.dtype(expected_dtype) + if np._get_promotion_state() == "legacy": + expected_dtype = np.promote_types(expected_dtype, np.float64) + + arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0], dtype=input_dtype) + weights = np.ones_like(arr) if weighted else None + if input_dtype is np.longdouble: + if function is np.quantile: + # 0.4 is not exactly representable and it matters + # for "averaged_inverted_cdf", so we need to cheat. + quantile = input_dtype("0.4") + # We want to use nulp, but that does not work for longdouble + test_function = np.testing.assert_almost_equal + else: + test_function = np.testing.assert_array_almost_equal_nulp + + actual = function(arr, quantile, method=method, weights=weights) + + test_function(actual, expected_dtype.type(expected)) + + if method in ["inverted_cdf", "closest_observation"]: + if input_dtype == "O": + np.testing.assert_equal(np.asarray(actual).dtype, np.float64) + else: + np.testing.assert_equal(np.asarray(actual).dtype, + np.dtype(input_dtype)) + else: + np.testing.assert_equal(np.asarray(actual).dtype, + np.dtype(expected_dtype)) + + TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["Float"] + "O" + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_lower_higher(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 50, + method='lower'), 4) + assert_equal(np.percentile(np.arange(10, dtype=dtype), 50, + method='higher'), 5) + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_midpoint(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 51, + method='midpoint'), 4.5) + assert_equal(np.percentile(np.arange(9, dtype=dtype) + 1, 50, + method='midpoint'), 5) + assert_equal(np.percentile(np.arange(11, dtype=dtype), 51, + method='midpoint'), 5.5) + assert_equal(np.percentile(np.arange(11, dtype=dtype), 50, + method='midpoint'), 5) + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_nearest(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 51, + method='nearest'), 5) + assert_equal(np.percentile(np.arange(10, dtype=dtype), 49, + method='nearest'), 4) + + def test_linear_interpolation_extrapolation(self): + arr = np.random.rand(5) + + actual = np.percentile(arr, 100) + np.testing.assert_equal(actual, arr.max()) + + actual = np.percentile(arr, 0) + np.testing.assert_equal(actual, arr.min()) + + def test_sequence(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75]) + + def test_axis(self): + x = np.arange(12).reshape(3, 4) + + assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0]) + + r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0) + + r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T) + + # ensure qth axis is always first as with np.array(old_percentile(..)) + x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + assert_equal(np.percentile(x, (25, 50)).shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5)) + assert_equal( + np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), + method="higher").shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75), + method="higher").shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0, + method="higher").shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1, + method="higher").shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2, + method="higher").shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3, + method="higher").shape, (2, 3, 4, 5)) + assert_equal(np.percentile(x, (25, 50, 75), axis=1, + method="higher").shape, (3, 3, 5, 6)) + + def test_scalar_q(self): + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50), 5.5) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + assert_equal(np.percentile(x, 50, axis=0), r0) + assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) + r1 = np.array([1.5, 5.5, 9.5]) + assert_almost_equal(np.percentile(x, 50, axis=1), r1) + assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) + + out = np.empty(1) + assert_equal(np.percentile(x, 50, out=out), 5.5) + assert_equal(out, 5.5) + out = np.empty(4) + assert_equal(np.percentile(x, 50, axis=0, out=out), r0) + assert_equal(out, r0) + out = np.empty(3) + assert_equal(np.percentile(x, 50, axis=1, out=out), r1) + assert_equal(out, r1) + + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50, method='lower'), 5.) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + c0 = np.percentile(x, 50, method='lower', axis=0) + assert_equal(c0, r0) + assert_equal(c0.shape, r0.shape) + r1 = np.array([1., 5., 9.]) + c1 = np.percentile(x, 50, method='lower', axis=1) + assert_almost_equal(c1, r1) + assert_equal(c1.shape, r1.shape) + + out = np.empty((), dtype=x.dtype) + c = np.percentile(x, 50, method='lower', out=out) + assert_equal(c, 5) + assert_equal(out, 5) + out = np.empty(4, dtype=x.dtype) + c = np.percentile(x, 50, method='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + out = np.empty(3, dtype=x.dtype) + c = np.percentile(x, 50, method='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_exception(self): + assert_raises(ValueError, np.percentile, [1, 2], 56, + method='foobar') + assert_raises(ValueError, np.percentile, [1], 101) + assert_raises(ValueError, np.percentile, [1], -1) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101]) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1]) + + def test_percentile_list(self): + assert_equal(np.percentile([1, 2, 3], 0), 1) + + @pytest.mark.parametrize( + "percentile, with_weights", + [ + (np.percentile, False), + (partial(np.percentile, method="inverted_cdf"), True), + ] + ) + def test_percentile_out(self, percentile, with_weights): + out_dtype = int if with_weights else float + x = np.array([1, 2, 3]) + y = np.zeros((3,), dtype=out_dtype) + p = (1, 2, 3) + weights = np.ones_like(x) if with_weights else None + r = percentile(x, p, out=y, weights=weights) + assert r is y + assert_equal(percentile(x, p, weights=weights), y) + + x = np.array([[1, 2, 3], + [4, 5, 6]]) + y = np.zeros((3, 3), dtype=out_dtype) + weights = np.ones_like(x) if with_weights else None + r = percentile(x, p, axis=0, out=y, weights=weights) + assert r is y + assert_equal(percentile(x, p, weights=weights, axis=0), y) + + y = np.zeros((3, 2), dtype=out_dtype) + percentile(x, p, axis=1, out=y, weights=weights) + assert_equal(percentile(x, p, weights=weights, axis=1), y) + + x = np.arange(12).reshape(3, 4) + # q.dim > 1, float + if with_weights: + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + else: + r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]]) + out = np.empty((2, 4), dtype=out_dtype) + weights = np.ones_like(x) if with_weights else None + assert_equal( + percentile(x, (25, 50), axis=0, out=out, weights=weights), r0 + ) + assert_equal(out, r0) + r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) + out = np.empty((2, 3)) + assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) + assert_equal(out, r1) + + # q.dim > 1, int + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + out = np.empty((2, 4), dtype=x.dtype) + c = np.percentile(x, (25, 50), method='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + r1 = np.array([[0, 4, 8], [1, 5, 9]]) + out = np.empty((2, 3), dtype=x.dtype) + c = np.percentile(x, (25, 50), method='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_percentile_empty_dim(self): + # empty dims are preserved + d = np.arange(11 * 2).reshape(11, 1, 2, 1) + assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1)) + + assert_array_equal(np.percentile(d, 50, axis=2, + method='midpoint').shape, + (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-2, + method='midpoint').shape, + (11, 1, 1)) + + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape, + (2, 1, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape, + (2, 11, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape, + (2, 11, 1, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape, + (2, 11, 1, 2)) + + def test_percentile_no_overwrite(self): + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50], overwrite_input=False) + assert_equal(a, np.array([2, 3, 4, 1])) + + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50]) + assert_equal(a, np.array([2, 3, 4, 1])) + + def test_no_p_overwrite(self): + p = np.linspace(0., 100., num=5) + np.percentile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5)) + p = np.linspace(0., 100., num=5).tolist() + np.percentile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) + + def test_percentile_overwrite(self): + a = np.array([2, 3, 4, 1]) + b = np.percentile(a, [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + + assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)), + np.percentile(x, [25, 60], axis=None)) + assert_equal(np.percentile(x, [25, 60], axis=(0,)), + np.percentile(x, [25, 60], axis=0)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], + np.percentile(d[:,:,:, 0].flatten(), 25)) + assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], + np.percentile(d[:,:, 1,:].flatten(), [10, 90])) + assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], + np.percentile(d[:,:, 2,:].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], + np.percentile(d[2,:,:,:].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], + np.percentile(d[2, 1,:,:].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], + np.percentile(d[2,:,:, 1].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], + np.percentile(d[2,:, 2,:].flatten(), 25)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.percentile, d, axis=-5, q=25) + assert_raises(AxisError, np.percentile, d, axis=(0, -5), q=25) + assert_raises(AxisError, np.percentile, d, axis=4, q=25) + assert_raises(AxisError, np.percentile, d, axis=(0, 4), q=25) + # each of these refers to the same axis twice + assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3), + keepdims=True).shape, (2, 1, 1, 7, 1)) + assert_equal(np.percentile(d, [1, 7], axis=(0, 3), + keepdims=True).shape, (2, 1, 5, 7, 1)) + + @pytest.mark.parametrize('q', [7, [1, 7]]) + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1,), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, q, axis): + d = np.ones((3, 5, 7, 11)) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + shape_out = np.shape(q) + shape_out + + out = np.empty(shape_out) + result = np.percentile(d, q, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.percentile(d, 0, 0, out=o), o) + assert_equal(np.percentile(d, 0, 0, method='nearest', out=o), o) + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o), o) + assert_equal(np.percentile(d, 1, 1, method='nearest', out=o), o) + + o = np.zeros(()) + assert_equal(np.percentile(d, 2, out=o), o) + assert_equal(np.percentile(d, 2, method='nearest', out=o), o) + + @pytest.mark.parametrize("method, weighted", [ + ("linear", False), + ("nearest", False), + ("inverted_cdf", False), + ("inverted_cdf", True), + ]) + def test_out_nan(self, method, weighted): + if weighted: + kwargs = {"weights": np.ones((3, 4)), "method": method} + else: + kwargs = {"method": method} + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.percentile(d, 0, 0, out=o, **kwargs), o) + + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o, **kwargs), o) + + o = np.zeros(()) + assert_equal(np.percentile(d, 1, out=o, **kwargs), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3, axis=0), np.nan) + assert_equal(np.percentile(a, [0.3, 0.6], axis=0), + np.array([np.nan] * 2)) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3).ndim, 0) + + # axis0 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.percentile(a, 0.3, 0), b) + + # axis0 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], 0) + b[:, 2, 3] = np.nan + b[:, 1, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], 0), b) + + # axis1 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.percentile(a, 0.3, 1), b) + # axis1 not zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1) + b[:, 1, 3] = np.nan + b[:, 1, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], 1), b) + + # axis02 zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.percentile(a, 0.3, (0, 2)), b) + # axis02 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2)) + b[:, 1] = np.nan + b[:, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b) + # axis02 not zerod with method='nearest' + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2), method='nearest') + b[:, 1] = np.nan + b[:, 2] = np.nan + assert_equal(np.percentile( + a, [0.3, 0.6], (0, 2), method='nearest'), b) + + def test_nan_q(self): + # GH18830 + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], np.nan) + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], [np.nan]) + q = np.linspace(1.0, 99.0, 16) + q[0] = np.nan + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], q) + + @pytest.mark.parametrize("dtype", ["m8[D]", "M8[s]"]) + @pytest.mark.parametrize("pos", [0, 23, 10]) + def test_nat_basic(self, dtype, pos): + # TODO: Note that times have dubious rounding as of fixing NaTs! + # NaT and NaN should behave the same, do basic tests for NaT: + a = np.arange(0, 24, dtype=dtype) + a[pos] = "NaT" + res = np.percentile(a, 30) + assert res.dtype == dtype + assert np.isnat(res) + res = np.percentile(a, [30, 60]) + assert res.dtype == dtype + assert np.isnat(res).all() + + a = np.arange(0, 24*3, dtype=dtype).reshape(-1, 3) + a[pos, 1] = "NaT" + res = np.percentile(a, 30, axis=0) + assert_array_equal(np.isnat(res), [False, True, False]) + + +quantile_methods = [ + 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', + 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', + 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', + 'midpoint'] + + +methods_supporting_weights = ["inverted_cdf"] + + +class TestQuantile: + # most of this is already tested by TestPercentile + + def V(self, x, y, alpha): + # Identification function used in several tests. + return (x >= y) - alpha + + def test_max_ulp(self): + x = [0.0, 0.2, 0.4] + a = np.quantile(x, 0.45) + # The default linear method would result in 0 + 0.2 * (0.45/2) = 0.18. + # 0.18 is not exactly representable and the formula leads to a 1 ULP + # different result. Ensure it is this exact within 1 ULP, see gh-20331. + np.testing.assert_array_max_ulp(a, 0.18, maxulp=1) + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.quantile(x, 0), 0.) + assert_equal(np.quantile(x, 1), 3.5) + assert_equal(np.quantile(x, 0.5), 1.75) + + def test_correct_quantile_value(self): + a = np.array([True]) + tf_quant = np.quantile(True, False) + assert_equal(tf_quant, a[0]) + assert_equal(type(tf_quant), a.dtype) + a = np.array([False, True, True]) + quant_res = np.quantile(a, a) + assert_array_equal(quant_res, a) + assert_equal(quant_res.dtype, a.dtype) + + def test_fraction(self): + # fractional input, integral quantile + x = [Fraction(i, 2) for i in range(8)] + q = np.quantile(x, 0) + assert_equal(q, 0) + assert_equal(type(q), Fraction) + + q = np.quantile(x, 1) + assert_equal(q, Fraction(7, 2)) + assert_equal(type(q), Fraction) + + q = np.quantile(x, .5) + assert_equal(q, 1.75) + assert_equal(type(q), np.float64) + + q = np.quantile(x, Fraction(1, 2)) + assert_equal(q, Fraction(7, 4)) + assert_equal(type(q), Fraction) + + q = np.quantile(x, [Fraction(1, 2)]) + assert_equal(q, np.array([Fraction(7, 4)])) + assert_equal(type(q), np.ndarray) + + q = np.quantile(x, [[Fraction(1, 2)]]) + assert_equal(q, np.array([[Fraction(7, 4)]])) + assert_equal(type(q), np.ndarray) + + # repeat with integral input but fractional quantile + x = np.arange(8) + assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2)) + + def test_complex(self): + #See gh-22652 + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.quantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.quantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_quantile_preserve_int_type(self, dtype): + res = np.quantile(np.array([1, 2], dtype=dtype), [0.5], + method="nearest") + assert res.dtype == dtype + + @pytest.mark.parametrize("method", quantile_methods) + def test_q_zero_one(self, method): + # gh-24710 + arr = [10, 11, 12] + quantile = np.quantile(arr, q = [0, 1], method=method) + assert_equal(quantile, np.array([10, 12])) + + @pytest.mark.parametrize("method", quantile_methods) + def test_quantile_monotonic(self, method): + # GH 14685 + # test that the return value of quantile is monotonic if p0 is ordered + # Also tests that the boundary values are not mishandled. + p0 = np.linspace(0, 1, 101) + quantile = np.quantile(np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9, + 8, 8, 7]) * 0.1, p0, method=method) + assert_equal(np.sort(quantile), quantile) + + # Also test one where the number of data points is clearly divisible: + quantile = np.quantile([0., 1., 2., 3.], p0, method=method) + assert_equal(np.sort(quantile), quantile) + + @hypothesis.given( + arr=arrays(dtype=np.float64, + shape=st.integers(min_value=3, max_value=1000), + elements=st.floats(allow_infinity=False, allow_nan=False, + min_value=-1e300, max_value=1e300))) + def test_quantile_monotonic_hypo(self, arr): + p0 = np.arange(0, 1, 0.01) + quantile = np.quantile(arr, p0) + assert_equal(np.sort(quantile), quantile) + + def test_quantile_scalar_nan(self): + a = np.array([[10., 7., 4.], [3., 2., 1.]]) + a[0][1] = np.nan + actual = np.quantile(a, 0.5) + assert np.isscalar(actual) + assert_equal(np.quantile(a, 0.5), np.nan) + + @pytest.mark.parametrize("weights", [False, True]) + @pytest.mark.parametrize("method", quantile_methods) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_identification_equation(self, weights, method, alpha): + # Test that the identification equation holds for the empirical + # CDF: + # E[V(x, Y)] = 0 <=> x is quantile + # with Y the random variable for which we have observed values and + # V(x, y) the canonical identification function for the quantile (at + # level alpha), see + # https://doi.org/10.48550/arXiv.0912.0902 + if weights and method not in methods_supporting_weights: + pytest.skip("Weights not supported by method.") + rng = np.random.default_rng(4321) + # We choose n and alpha such that we cover 3 cases: + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + w = rng.integers(low=0, high=10, size=n) if weights else None + x = np.quantile(y, alpha, method=method, weights=w) + + if method in ("higher",): + # These methods do not fulfill the identification equation. + assert np.abs(np.mean(self.V(x, y, alpha))) > 0.1 / n + elif int(n * alpha) == n * alpha and not weights: + # We can expect exact results, up to machine precision. + assert_allclose( + np.average(self.V(x, y, alpha), weights=w), 0, atol=1e-14, + ) + else: + # V = (x >= y) - alpha cannot sum to zero exactly but within + # "sample precision". + assert_allclose(np.average(self.V(x, y, alpha), weights=w), 0, + atol=1 / n / np.amin([alpha, 1 - alpha])) + + @pytest.mark.parametrize("weights", [False, True]) + @pytest.mark.parametrize("method", quantile_methods) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_add_and_multiply_constant(self, weights, method, alpha): + # Test that + # 1. quantile(c + x) = c + quantile(x) + # 2. quantile(c * x) = c * quantile(x) + # 3. quantile(-x) = -quantile(x, 1 - alpha) + # On empirical quantiles, this equation does not hold exactly. + # Koenker (2005) "Quantile Regression" Chapter 2.2.3 calls these + # properties equivariance. + if weights and method not in methods_supporting_weights: + pytest.skip("Weights not supported by method.") + rng = np.random.default_rng(4321) + # We choose n and alpha such that we have cases for + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + w = rng.integers(low=0, high=10, size=n) if weights else None + q = np.quantile(y, alpha, method=method, weights=w) + c = 13.5 + + # 1 + assert_allclose(np.quantile(c + y, alpha, method=method, weights=w), + c + q) + # 2 + assert_allclose(np.quantile(c * y, alpha, method=method, weights=w), + c * q) + # 3 + if weights: + # From here on, we would need more methods to support weights. + return + q = -np.quantile(-y, 1 - alpha, method=method) + if method == "inverted_cdf": + if ( + n * alpha == int(n * alpha) + or np.round(n * alpha) == int(n * alpha) + 1 + ): + assert_allclose(q, np.quantile(y, alpha, method="higher")) + else: + assert_allclose(q, np.quantile(y, alpha, method="lower")) + elif method == "closest_observation": + if n * alpha == int(n * alpha): + assert_allclose(q, np.quantile(y, alpha, method="higher")) + elif np.round(n * alpha) == int(n * alpha) + 1: + assert_allclose( + q, np.quantile(y, alpha + 1/n, method="higher")) + else: + assert_allclose(q, np.quantile(y, alpha, method="lower")) + elif method == "interpolated_inverted_cdf": + assert_allclose(q, np.quantile(y, alpha + 1/n, method=method)) + elif method == "nearest": + if n * alpha == int(n * alpha): + assert_allclose(q, np.quantile(y, alpha + 1/n, method=method)) + else: + assert_allclose(q, np.quantile(y, alpha, method=method)) + elif method == "lower": + assert_allclose(q, np.quantile(y, alpha, method="higher")) + elif method == "higher": + assert_allclose(q, np.quantile(y, alpha, method="lower")) + else: + # "averaged_inverted_cdf", "hazen", "weibull", "linear", + # "median_unbiased", "normal_unbiased", "midpoint" + assert_allclose(q, np.quantile(y, alpha, method=method)) + + @pytest.mark.parametrize("method", methods_supporting_weights) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_constant_weights(self, method, alpha): + rng = np.random.default_rng(4321) + # We choose n and alpha such that we have cases for + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + q = np.quantile(y, alpha, method=method) + + w = np.ones_like(y) + qw = np.quantile(y, alpha, method=method, weights=w) + assert_allclose(qw, q) + + w = 8.125 * np.ones_like(y) + qw = np.quantile(y, alpha, method=method, weights=w) + assert_allclose(qw, q) + + @pytest.mark.parametrize("method", methods_supporting_weights) + @pytest.mark.parametrize("alpha", [0, 0.2, 0.5, 0.9, 1]) + def test_quantile_with_integer_weights(self, method, alpha): + # Integer weights can be interpreted as repeated observations. + rng = np.random.default_rng(4321) + # We choose n and alpha such that we have cases for + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + w = rng.integers(low=0, high=10, size=n, dtype=np.int32) + + qw = np.quantile(y, alpha, method=method, weights=w) + q = np.quantile(np.repeat(y, w), alpha, method=method) + assert_allclose(qw, q) + + @pytest.mark.parametrize("method", methods_supporting_weights) + def test_quantile_with_weights_and_axis(self, method): + rng = np.random.default_rng(4321) + + # 1d weight and single alpha + y = rng.random((2, 10, 3)) + w = np.abs(rng.random(10)) + alpha = 0.5 + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = np.zeros(shape=(2, 3)) + for i in range(2): + for j in range(3): + q_res[i, j] = np.quantile( + y[i, :, j], alpha, method=method, weights=w + ) + assert_allclose(q, q_res) + + # 1d weight and 1d alpha + alpha = [0, 0.2, 0.4, 0.6, 0.8, 1] # shape (6,) + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = np.zeros(shape=(6, 2, 3)) + for i in range(2): + for j in range(3): + q_res[:, i, j] = np.quantile( + y[i, :, j], alpha, method=method, weights=w + ) + assert_allclose(q, q_res) + + # 1d weight and 2d alpha + alpha = [[0, 0.2], [0.4, 0.6], [0.8, 1]] # shape (3, 2) + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = q_res.reshape((3, 2, 2, 3)) + assert_allclose(q, q_res) + + # shape of weights equals shape of y + w = np.abs(rng.random((2, 10, 3))) + alpha = 0.5 + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = np.zeros(shape=(2, 3)) + for i in range(2): + for j in range(3): + q_res[i, j] = np.quantile( + y[i, :, j], alpha, method=method, weights=w[i, :, j] + ) + assert_allclose(q, q_res) + + def test_quantile_weights_raises_negative_weights(self): + y = [1, 2] + w = [-0.5, 1] + with pytest.raises(ValueError, match="Weights must be non-negative"): + np.quantile(y, 0.5, weights=w, method="inverted_cdf") + + @pytest.mark.parametrize( + "method", + sorted(set(quantile_methods) - set(methods_supporting_weights)), + ) + def test_quantile_weights_raises_unsupported_methods(self, method): + y = [1, 2] + w = [0.5, 1] + msg = "Only method 'inverted_cdf' supports weights" + with pytest.raises(ValueError, match=msg): + np.quantile(y, 0.5, weights=w, method=method) + + def test_weibull_fraction(self): + arr = [Fraction(0, 1), Fraction(1, 10)] + quantile = np.quantile(arr, [0, ], method='weibull') + assert_equal(quantile, np.array(Fraction(0, 1))) + quantile = np.quantile(arr, [Fraction(1, 2)], method='weibull') + assert_equal(quantile, np.array(Fraction(1, 20))) + + def test_closest_observation(self): + # Round ties to nearest even order statistic (see #26656) + m = 'closest_observation' + q = 0.5 + arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + assert_equal(2, np.quantile(arr[0:3], q, method=m)) + assert_equal(2, np.quantile(arr[0:4], q, method=m)) + assert_equal(2, np.quantile(arr[0:5], q, method=m)) + assert_equal(3, np.quantile(arr[0:6], q, method=m)) + assert_equal(4, np.quantile(arr[0:7], q, method=m)) + assert_equal(4, np.quantile(arr[0:8], q, method=m)) + assert_equal(4, np.quantile(arr[0:9], q, method=m)) + assert_equal(5, np.quantile(arr, q, method=m)) + + +class TestLerp: + @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + t1=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a = st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b = st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_monotonic(self, t0, t1, a, b): + l0 = nfb._lerp(a, b, t0) + l1 = nfb._lerp(a, b, t1) + if t0 == t1 or a == b: + assert l0 == l1 # uninteresting + elif (t0 < t1) == (a < b): + assert l0 <= l1 + else: + assert l0 >= l1 + + @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_bounded(self, t, a, b): + if a <= b: + assert a <= nfb._lerp(a, b, t) <= b + else: + assert b <= nfb._lerp(a, b, t) <= a + + @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_symmetric(self, t, a, b): + # double subtraction is needed to remove the extra precision of t < 0.5 + left = nfb._lerp(a, b, 1 - (1 - t)) + right = nfb._lerp(b, a, 1 - t) + assert_allclose(left, right) + + def test_linear_interpolation_formula_0d_inputs(self): + a = np.array(2) + b = np.array(5) + t = np.array(0.2) + assert nfb._lerp(a, b, t) == 2.6 + + +class TestMedian: + + def test_basic(self): + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_equal(np.median(a0), 1) + assert_allclose(np.median(a1), 0.5) + assert_allclose(np.median(a2), 2.5) + assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) + assert_equal(np.median(a2, axis=1), [1, 4]) + assert_allclose(np.median(a2, axis=None), 2.5) + + a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775]) + assert_almost_equal((a[1] + a[3]) / 2., np.median(a)) + a = np.array([0.0463301, 0.0444502, 0.141249]) + assert_equal(a[0], np.median(a)) + a = np.array([0.0444502, 0.141249, 0.0463301]) + assert_equal(a[-1], np.median(a)) + # check array scalar result + assert_equal(np.median(a).ndim, 0) + a[1] = np.nan + assert_equal(np.median(a).ndim, 0) + + def test_axis_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]: + orig = a.copy() + np.median(a, axis=None) + for ax in range(a.ndim): + np.median(a, axis=ax) + assert_array_equal(a, orig) + + assert_allclose(np.median(a3, axis=0), [3, 4]) + assert_allclose(np.median(a3.T, axis=1), [3, 4]) + assert_allclose(np.median(a3), 3.5) + assert_allclose(np.median(a3, axis=None), 3.5) + assert_allclose(np.median(a3.T), 3.5) + + def test_overwrite_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) + assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) + assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) + assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0), + [1.5, 2.5, 3.5]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) + assert_allclose( + np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) + assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1), + [3, 4]) + + a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) + np.random.shuffle(a4.ravel()) + assert_allclose(np.median(a4, axis=None), + np.median(a4.copy(), axis=None, overwrite_input=True)) + assert_allclose(np.median(a4, axis=0), + np.median(a4.copy(), axis=0, overwrite_input=True)) + assert_allclose(np.median(a4, axis=1), + np.median(a4.copy(), axis=1, overwrite_input=True)) + assert_allclose(np.median(a4, axis=2), + np.median(a4.copy(), axis=2, overwrite_input=True)) + + def test_array_like(self): + x = [1, 2, 3] + assert_almost_equal(np.median(x), 2) + x2 = [x] + assert_almost_equal(np.median(x2), 2) + assert_allclose(np.median(x2, axis=0), x) + + def test_subclass(self): + # gh-3846 + class MySubClass(np.ndarray): + + def __new__(cls, input_array, info=None): + obj = np.asarray(input_array).view(cls) + obj.info = info + return obj + + def mean(self, axis=None, dtype=None, out=None): + return -7 + + a = MySubClass([1, 2, 3]) + assert_equal(np.median(a), -7) + + @pytest.mark.parametrize('arr', + ([1., 2., 3.], [1., np.nan, 3.], np.nan, 0.)) + def test_subclass2(self, arr): + """Check that we return subclasses, even if a NaN scalar.""" + class MySubclass(np.ndarray): + pass + + m = np.median(np.array(arr).view(MySubclass)) + assert isinstance(m, MySubclass) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_out_nan(self): + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a, axis=0), np.nan) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a).ndim, 0) + + # axis0 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.median(a, 0), b) + + # axis1 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.median(a, 1), b) + + # axis02 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.median(a, (0, 2)), b) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work correctly") + def test_empty(self): + # mean(empty array) emits two warnings: empty slice and divide by 0 + a = np.array([], dtype=float) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + assert_equal(len(w), 2) + + # multiple dimensions + a = np.array([], dtype=float, ndmin=3) + # no axis + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.array([], dtype=float, ndmin=2) + assert_equal(np.median(a, axis=0), b) + assert_equal(np.median(a, axis=1), b) + + # axis 2 + b = np.array(np.nan, dtype=float, ndmin=2) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + + def test_object(self): + o = np.arange(7.) + assert_(type(np.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.median(o.astype(object))), float) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.median(x, axis=(0, 1)), np.median(o)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.median(x, axis=(0, -1)), np.median(o)) + + assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None)) + assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0)) + assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.median(d, axis=(0, 1, 2))[0], + np.median(d[:,:,:, 0].flatten())) + assert_equal(np.median(d, axis=(0, 1, 3))[1], + np.median(d[:,:, 1,:].flatten())) + assert_equal(np.median(d, axis=(3, 1, -4))[2], + np.median(d[:,:, 2,:].flatten())) + assert_equal(np.median(d, axis=(3, 1, 2))[2], + np.median(d[2,:,:,:].flatten())) + assert_equal(np.median(d, axis=(3, 2))[2, 1], + np.median(d[2, 1,:,:].flatten())) + assert_equal(np.median(d, axis=(1, -2))[2, 1], + np.median(d[2,:,:, 1].flatten())) + assert_equal(np.median(d, axis=(1, 3))[2, 2], + np.median(d[2,:, 2,:].flatten())) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.median, d, axis=-5) + assert_raises(AxisError, np.median, d, axis=(0, -5)) + assert_raises(AxisError, np.median, d, axis=4) + assert_raises(AxisError, np.median, d, axis=(0, 4)) + assert_raises(ValueError, np.median, d, axis=(1, 1)) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.median(d, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.median(d, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, axis): + d = np.ones((3, 5, 7, 11)) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = np.empty(shape_out) + result = np.median(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + @pytest.mark.parametrize("dtype", ["m8[s]"]) + @pytest.mark.parametrize("pos", [0, 23, 10]) + def test_nat_behavior(self, dtype, pos): + # TODO: Median does not support Datetime, due to `mean`. + # NaT and NaN should behave the same, do basic tests for NaT. + a = np.arange(0, 24, dtype=dtype) + a[pos] = "NaT" + res = np.median(a) + assert res.dtype == dtype + assert np.isnat(res) + res = np.percentile(a, [30, 60]) + assert res.dtype == dtype + assert np.isnat(res).all() + + a = np.arange(0, 24*3, dtype=dtype).reshape(-1, 3) + a[pos, 1] = "NaT" + res = np.median(a, axis=0) + assert_array_equal(np.isnat(res), [False, True, False]) + + +class TestSortComplex: + + @pytest.mark.parametrize("type_in, type_out", [ + ('l', 'D'), + ('h', 'F'), + ('H', 'F'), + ('b', 'F'), + ('B', 'F'), + ('g', 'G'), + ]) + def test_sort_real(self, type_in, type_out): + # sort_complex() type casting for real input types + a = np.array([5, 3, 6, 2, 1], dtype=type_in) + actual = np.sort_complex(a) + expected = np.sort(a).astype(type_out) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + + def test_sort_complex(self): + # sort_complex() handling of complex input + a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D') + expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D') + actual = np.sort_complex(a) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_histograms.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_histograms.py new file mode 100644 index 0000000000000000000000000000000000000000..b1d33209cfb3ea5a95f47dda285206877c56d654 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_histograms.py @@ -0,0 +1,813 @@ +import numpy as np + +from numpy import histogram, histogramdd, histogram_bin_edges +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_allclose, + assert_array_max_ulp, assert_raises_regex, suppress_warnings, + ) +from numpy.testing._private.utils import requires_memory +import pytest + + +class TestHistogram: + + def setup_method(self): + pass + + def teardown_method(self): + pass + + def test_simple(self): + n = 100 + v = np.random.rand(n) + (a, b) = histogram(v) + # check if the sum of the bins equals the number of samples + assert_equal(np.sum(a, axis=0), n) + # check that the bin counts are evenly spaced when the data is from + # a linear function + (a, b) = histogram(np.linspace(0, 10, 100)) + assert_array_equal(a, 10) + + def test_one_bin(self): + # Ticket 632 + hist, edges = histogram([1, 2, 3, 4], [1, 2]) + assert_array_equal(hist, [2, ]) + assert_array_equal(edges, [1, 2]) + assert_raises(ValueError, histogram, [1, 2], bins=0) + h, e = histogram([1, 2], bins=1) + assert_equal(h, np.array([2])) + assert_allclose(e, np.array([1., 2.])) + + def test_density(self): + # Check that the integral of the density equals 1. + n = 100 + v = np.random.rand(n) + a, b = histogram(v, density=True) + area = np.sum(a * np.diff(b)) + assert_almost_equal(area, 1) + + # Check with non-constant bin widths + v = np.arange(10) + bins = [0, 1, 3, 6, 10] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, .1) + assert_equal(np.sum(a * np.diff(b)), 1) + + # Test that passing False works too + a, b = histogram(v, bins, density=False) + assert_array_equal(a, [1, 2, 3, 4]) + + # Variable bin widths are especially useful to deal with + # infinities. + v = np.arange(10) + bins = [0, 1, 3, 6, np.inf] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, [.1, .1, .1, 0.]) + + # Taken from a bug report from N. Becker on the numpy-discussion + # mailing list Aug. 6, 2010. + counts, dmy = np.histogram( + [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True) + assert_equal(counts, [.25, 0]) + + def test_outliers(self): + # Check that outliers are not tallied + a = np.arange(10) + .5 + + # Lower outliers + h, b = histogram(a, range=[0, 9]) + assert_equal(h.sum(), 9) + + # Upper outliers + h, b = histogram(a, range=[1, 10]) + assert_equal(h.sum(), 9) + + # Normalization + h, b = histogram(a, range=[1, 9], density=True) + assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15) + + # Weights + w = np.arange(10) + .5 + h, b = histogram(a, range=[1, 9], weights=w, density=True) + assert_equal((h * np.diff(b)).sum(), 1) + + h, b = histogram(a, bins=8, range=[1, 9], weights=w) + assert_equal(h, w[1:-1]) + + def test_arr_weights_mismatch(self): + a = np.arange(10) + .5 + w = np.arange(11) + .5 + with assert_raises_regex(ValueError, "same shape as"): + h, b = histogram(a, range=[1, 9], weights=w, density=True) + + + def test_type(self): + # Check the type of the returned histogram + a = np.arange(10) + .5 + h, b = histogram(a) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, density=True) + assert_(np.issubdtype(h.dtype, np.floating)) + + h, b = histogram(a, weights=np.ones(10, int)) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, weights=np.ones(10, float)) + assert_(np.issubdtype(h.dtype, np.floating)) + + def test_f32_rounding(self): + # gh-4799, check that the rounding of the edges works with float32 + x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32) + y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) + counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) + assert_equal(counts_hist.sum(), 3.) + + def test_bool_conversion(self): + # gh-12107 + # Reference integer histogram + a = np.array([1, 1, 0], dtype=np.uint8) + int_hist, int_edges = np.histogram(a) + + # Should raise an warning on booleans + # Ensure that the histograms are equivalent, need to suppress + # the warnings to get the actual outputs + with suppress_warnings() as sup: + rec = sup.record(RuntimeWarning, 'Converting input from .*') + hist, edges = np.histogram([True, True, False]) + # A warning should be issued + assert_equal(len(rec), 1) + assert_array_equal(hist, int_hist) + assert_array_equal(edges, int_edges) + + def test_weights(self): + v = np.random.rand(100) + w = np.ones(100) * 5 + a, b = histogram(v) + na, nb = histogram(v, density=True) + wa, wb = histogram(v, weights=w) + nwa, nwb = histogram(v, weights=w, density=True) + assert_array_almost_equal(a * 5, wa) + assert_array_almost_equal(na, nwa) + + # Check weights are properly applied. + v = np.linspace(0, 10, 10) + w = np.concatenate((np.zeros(5), np.ones(5))) + wa, wb = histogram(v, bins=np.arange(11), weights=w) + assert_array_almost_equal(wa, w) + + # Check with integer weights + wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) + assert_array_equal(wa, [4, 5, 0, 1]) + wa, wb = histogram( + [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True) + assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4) + + # Check weights with non-uniform bin widths + a, b = histogram( + np.arange(9), [0, 1, 3, 6, 10], + weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) + assert_almost_equal(a, [.2, .1, .1, .075]) + + def test_exotic_weights(self): + + # Test the use of weights that are not integer or floats, but e.g. + # complex numbers or object types. + + # Complex weights + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Decimal weights + from decimal import Decimal + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([Decimal(1), Decimal(2), Decimal(3)]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + def test_no_side_effects(self): + # This is a regression test that ensures that values passed to + # ``histogram`` are unchanged. + values = np.array([1.3, 2.5, 2.3]) + np.histogram(values, range=[-10, 10], bins=100) + assert_array_almost_equal(values, [1.3, 2.5, 2.3]) + + def test_empty(self): + a, b = histogram([], bins=([0, 1])) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_error_binnum_type (self): + # Tests if right Error is raised if bins argument is float + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, 5) + assert_raises(TypeError, histogram, vals, 2.4) + + def test_finite_range(self): + # Normal ranges should be fine + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, range=[0.25,0.75]) + assert_raises(ValueError, histogram, vals, range=[np.nan,0.75]) + assert_raises(ValueError, histogram, vals, range=[0.25,np.inf]) + + def test_invalid_range(self): + # start of range must be < end of range + vals = np.linspace(0.0, 1.0, num=100) + with assert_raises_regex(ValueError, "max must be larger than"): + np.histogram(vals, range=[0.1, 0.01]) + + def test_bin_edge_cases(self): + # Ensure that floating-point computations correctly place edge cases. + arr = np.array([337, 404, 739, 806, 1007, 1811, 2012]) + hist, edges = np.histogram(arr, bins=8296, range=(2, 2280)) + mask = hist > 0 + left_edges = edges[:-1][mask] + right_edges = edges[1:][mask] + for x, left, right in zip(arr, left_edges, right_edges): + assert_(x >= left) + assert_(x < right) + + def test_last_bin_inclusive_range(self): + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5)) + assert_equal(hist[-1], 1) + + def test_bin_array_dims(self): + # gracefully handle bins object > 1 dimension + vals = np.linspace(0.0, 1.0, num=100) + bins = np.array([[0, 0.5], [0.6, 1.0]]) + with assert_raises_regex(ValueError, "must be 1d"): + np.histogram(vals, bins=bins) + + def test_unsigned_monotonicity_check(self): + # Ensures ValueError is raised if bins not increasing monotonically + # when bins contain unsigned values (see #9222) + arr = np.array([2]) + bins = np.array([1, 3, 1], dtype='uint64') + with assert_raises(ValueError): + hist, edges = np.histogram(arr, bins=bins) + + def test_object_array_of_0d(self): + # gh-7864 + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [-np.inf]) + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [np.inf]) + + # these should not crash + np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001]) + np.histogram([np.array(0.5) for i in range(10)] + [.5]) + + def test_some_nan_values(self): + # gh-7503 + one_nan = np.array([0, 1, np.nan]) + all_nan = np.array([np.nan, np.nan]) + + # the internal comparisons with NaN give warnings + sup = suppress_warnings() + sup.filter(RuntimeWarning) + with sup: + # can't infer range with nan + assert_raises(ValueError, histogram, one_nan, bins='auto') + assert_raises(ValueError, histogram, all_nan, bins='auto') + + # explicit range solves the problem + h, b = histogram(one_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 0) # nan is not counted + + # as does an explicit set of bins + h, b = histogram(one_nan, bins=[0, 1]) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins=[0, 1]) + assert_equal(h.sum(), 0) # nan is not counted + + def test_datetime(self): + begin = np.datetime64('2000-01-01', 'D') + offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20]) + bins = np.array([0, 2, 7, 20]) + dates = begin + offsets + date_bins = begin + bins + + td = np.dtype('timedelta64[D]') + + # Results should be the same for integer offsets or datetime values. + # For now, only explicit bins are supported, since linspace does not + # work on datetimes or timedeltas + d_count, d_edge = histogram(dates, bins=date_bins) + t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td)) + i_count, i_edge = histogram(offsets, bins=bins) + + assert_equal(d_count, i_count) + assert_equal(t_count, i_count) + + assert_equal((d_edge - begin).astype(int), i_edge) + assert_equal(t_edge.astype(int), i_edge) + + assert_equal(d_edge.dtype, dates.dtype) + assert_equal(t_edge.dtype, td) + + def do_signed_overflow_bounds(self, dtype): + exponent = 8 * np.dtype(dtype).itemsize - 1 + arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype) + hist, e = histogram(arr, bins=2) + assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4]) + assert_equal(hist, [1, 1]) + + def test_signed_overflow_bounds(self): + self.do_signed_overflow_bounds(np.byte) + self.do_signed_overflow_bounds(np.short) + self.do_signed_overflow_bounds(np.intc) + self.do_signed_overflow_bounds(np.int_) + self.do_signed_overflow_bounds(np.longlong) + + def do_precision_lower_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([1.0 + eps, 2.0], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[0] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [0]) + assert_equal(x_loc.dtype, float_large) + + def do_precision_upper_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([0.0, 1.0 - eps], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[-1] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [0]) + + assert_equal(x_loc.dtype, float_large) + + def do_precision(self, float_small, float_large): + self.do_precision_lower_bound(float_small, float_large) + self.do_precision_upper_bound(float_small, float_large) + + def test_precision(self): + # not looping results in a useful stack trace upon failure + self.do_precision(np.half, np.single) + self.do_precision(np.half, np.double) + self.do_precision(np.half, np.longdouble) + self.do_precision(np.single, np.double) + self.do_precision(np.single, np.longdouble) + self.do_precision(np.double, np.longdouble) + + def test_histogram_bin_edges(self): + hist, e = histogram([1, 2, 3, 4], [1, 2]) + edges = histogram_bin_edges([1, 2, 3, 4], [1, 2]) + assert_array_equal(edges, e) + + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, e = histogram(arr, bins=30, range=(-0.5, 5)) + edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5)) + assert_array_equal(edges, e) + + hist, e = histogram(arr, bins='auto', range=(0, 1)) + edges = histogram_bin_edges(arr, bins='auto', range=(0, 1)) + assert_array_equal(edges, e) + + # @requires_memory(free_bytes=1e10) + # @pytest.mark.slow + @pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing") + def test_big_arrays(self): + sample = np.zeros([100000000, 3]) + xbins = 400 + ybins = 400 + zbins = np.arange(16000) + hist = np.histogramdd(sample=sample, bins=(xbins, ybins, zbins)) + assert_equal(type(hist), type((1, 2))) + + def test_gh_23110(self): + hist, e = np.histogram(np.array([-0.9e-308], dtype='>f8'), + bins=2, + range=(-1e-308, -2e-313)) + expected_hist = np.array([1, 0]) + assert_array_equal(hist, expected_hist) + + +class TestHistogramOptimBinNums: + """ + Provide test coverage when using provided estimators for optimal number of + bins + """ + + def test_empty(self): + estimator_list = ['fd', 'scott', 'rice', 'sturges', + 'doane', 'sqrt', 'auto', 'stone'] + # check it can deal with empty data + for estimator in estimator_list: + a, b = histogram([], bins=estimator) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_simple(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). All test values have been precomputed and the values + shouldn't change + """ + # Some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, + 'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2}, + 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, + 'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9}, + 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, + 'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}} + + for testlen, expectedResults in basic_test.items(): + # Create some sort of non uniform data to test with + # (2 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x = np.concatenate((x1, x2)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator) + assert_equal(len(a), numbins, err_msg="For the {0} estimator " + "with datasize of {1}".format(estimator, testlen)) + + def test_small(self): + """ + Smaller datasets have the potential to cause issues with the data + adaptive methods, especially the FD method. All bin numbers have been + precalculated. + """ + small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'stone': 1}, + 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2, + 'doane': 1, 'sqrt': 2, 'stone': 1}, + 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3, + 'doane': 3, 'sqrt': 2, 'stone': 1}} + + for testlen, expectedResults in small_dat.items(): + testdat = np.arange(testlen) + for estimator, expbins in expectedResults.items(): + a, b = np.histogram(testdat, estimator) + assert_equal(len(a), expbins, err_msg="For the {0} estimator " + "with datasize of {1}".format(estimator, testlen)) + + def test_incorrect_methods(self): + """ + Check a Value Error is thrown when an unknown string is passed in + """ + check_list = ['mad', 'freeman', 'histograms', 'IQR'] + for estimator in check_list: + assert_raises(ValueError, histogram, [1, 2, 3], estimator) + + def test_novariance(self): + """ + Check that methods handle no variance in data + Primarily for Scott and FD as the SD and IQR are both 0 in this case + """ + novar_dataset = np.ones(100) + novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1} + + for estimator, numbins in novar_resultdict.items(): + a, b = np.histogram(novar_dataset, estimator) + assert_equal(len(a), numbins, err_msg="{0} estimator, " + "No Variance test".format(estimator)) + + def test_limited_variance(self): + """ + Check when IQR is 0, but variance exists, we return the sturges value + and not the fd value. + """ + lim_var_data = np.ones(1000) + lim_var_data[:3] = 0 + lim_var_data[-4:] = 100 + + edges_auto = histogram_bin_edges(lim_var_data, 'auto') + assert_equal(edges_auto, np.linspace(0, 100, 12)) + + edges_fd = histogram_bin_edges(lim_var_data, 'fd') + assert_equal(edges_fd, np.array([0, 100])) + + edges_sturges = histogram_bin_edges(lim_var_data, 'sturges') + assert_equal(edges_sturges, np.linspace(0, 100, 12)) + + def test_outlier(self): + """ + Check the FD, Scott and Doane with outliers. + + The FD estimates a smaller binwidth since it's less affected by + outliers. Since the range is so (artificially) large, this means more + bins, most of which will be empty, but the data of interest usually is + unaffected. The Scott estimator is more affected and returns fewer bins, + despite most of the variance being in one area of the data. The Doane + estimator lies somewhere between the other two. + """ + xcenter = np.linspace(-10, 10, 50) + outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter)) + + outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6} + + for estimator, numbins in outlier_resultdict.items(): + a, b = np.histogram(outlier_dataset, estimator) + assert_equal(len(a), numbins) + + def test_scott_vs_stone(self): + """Verify that Scott's rule and Stone's rule converges for normally distributed data""" + + def nbins_ratio(seed, size): + rng = np.random.RandomState(seed) + x = rng.normal(loc=0, scale=2, size=size) + a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) + return a / (a + b) + + ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)] + for seed in range(10)] + + # the average difference between the two methods decreases as the dataset size increases. + avg = abs(np.mean(ll, axis=0) - 0.5) + assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) + + def test_simple_range(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). Adding in a 3rd mixture that will then be + completely ignored. All test values have been precomputed and + the shouldn't change. + """ + # some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = { + 50: {'fd': 8, 'scott': 8, 'rice': 15, + 'sturges': 14, 'auto': 14, 'stone': 8}, + 500: {'fd': 15, 'scott': 16, 'rice': 32, + 'sturges': 20, 'auto': 20, 'stone': 80}, + 5000: {'fd': 33, 'scott': 33, 'rice': 69, + 'sturges': 27, 'auto': 33, 'stone': 80} + } + + for testlen, expectedResults in basic_test.items(): + # create some sort of non uniform data to test with + # (3 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x3 = np.linspace(-100, -50, testlen) + x = np.hstack((x1, x2, x3)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator, range = (-20, 20)) + msg = "For the {0} estimator".format(estimator) + msg += " with datasize of {0}".format(testlen) + assert_equal(len(a), numbins, err_msg=msg) + + @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', + 'stone', 'rice', 'sturges']) + def test_signed_integer_data(self, bins): + # Regression test for gh-14379. + a = np.array([-2, 0, 127], dtype=np.int8) + hist, edges = np.histogram(a, bins=bins) + hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins) + assert_array_equal(hist, hist32) + assert_array_equal(edges, edges32) + + def test_simple_weighted(self): + """ + Check that weighted data raises a TypeError + """ + estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] + for estimator in estimator_list: + assert_raises(TypeError, histogram, [1, 2, 3], + estimator, weights=[1, 2, 3]) + + +class TestHistogramdd: + + def test_simple(self): + x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], + [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) + H, edges = histogramdd(x, (2, 3, 3), + range=[[-1, 1], [0, 3], [0, 3]]) + answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], + [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) + assert_array_equal(H, answer) + + # Check normalization + ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] + H, edges = histogramdd(x, bins=ed, density=True) + assert_(np.all(H == answer / 12.)) + + # Check that H has the correct shape. + H, edges = histogramdd(x, (2, 3, 4), + range=[[-1, 1], [0, 3], [0, 4]], + density=True) + answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], + [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) + assert_array_almost_equal(H, answer / 6., 4) + # Check that a sequence of arrays is accepted and H has the correct + # shape. + z = [np.squeeze(y) for y in np.split(x, 3, axis=1)] + H, edges = histogramdd( + z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) + answer = np.array([[[0, 0], [0, 0], [0, 0]], + [[0, 1], [0, 0], [1, 0]], + [[0, 1], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0]]]) + assert_array_equal(H, answer) + + Z = np.zeros((5, 5, 5)) + Z[list(range(5)), list(range(5)), list(range(5))] = 1. + H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) + assert_array_equal(H, Z) + + def test_shape_3d(self): + # All possible permutations for bins of different lengths in 3D. + bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), + (4, 5, 6)) + r = np.random.rand(10, 3) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_shape_4d(self): + # All possible permutations for bins of different lengths in 4D. + bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), + (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), + (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), + (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), + (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), + (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) + + r = np.random.rand(10, 4) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_weights(self): + v = np.random.rand(100, 2) + hist, edges = histogramdd(v) + n_hist, edges = histogramdd(v, density=True) + w_hist, edges = histogramdd(v, weights=np.ones(100)) + assert_array_equal(w_hist, hist) + w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True) + assert_array_equal(w_hist, n_hist) + w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) + assert_array_equal(w_hist, 2 * hist) + + def test_identical_samples(self): + x = np.zeros((10, 2), int) + hist, edges = histogramdd(x, bins=2) + assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) + + def test_empty(self): + a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, np.array([[0.]])) + a, b = np.histogramdd([[], [], []], bins=2) + assert_array_max_ulp(a, np.zeros((2, 2, 2))) + + def test_bins_errors(self): + # There are two ways to specify bins. Check for the right errors + # when mixing those. + x = np.arange(8).reshape(2, 4) + assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) + assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) + assert_raises( + ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) + assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) + + def test_inf_edges(self): + # Test using +/-inf bin edges works. See #1788. + with np.errstate(invalid='ignore'): + x = np.arange(6).reshape(3, 2) + expected = np.array([[1, 0], [0, 1], [0, 1]]) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) + assert_allclose(h, expected) + + def test_rightmost_binedge(self): + # Test event very close to rightmost binedge. See Github issue #4266 + x = [0.9999999995] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0000000001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + x = [1.0001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + + def test_finite_range(self): + vals = np.random.random((100, 3)) + histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) + + def test_equal_edges(self): + """ Test that adjacent entries in an edge array can be equal """ + x = np.array([0, 1, 2]) + y = np.array([0, 1, 2]) + x_edges = np.array([0, 2, 2]) + y_edges = 1 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + hist_expected = np.array([ + [2.], + [1.], # x == 2 falls in the final bin + ]) + assert_equal(hist, hist_expected) + + def test_edge_dtype(self): + """ Test that if an edge array is input, its type is preserved """ + x = np.array([0, 10, 20]) + y = x / 10 + x_edges = np.array([0, 5, 15, 20]) + y_edges = x_edges / 10 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(edges[0].dtype, x_edges.dtype) + assert_equal(edges[1].dtype, y_edges.dtype) + + def test_large_integers(self): + big = 2**60 # Too large to represent with a full precision float + + x = np.array([0], np.int64) + x_edges = np.array([-1, +1], np.int64) + y = big + x + y_edges = big + x_edges + + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(hist[0, 0], 1) + + def test_density_non_uniform_2d(self): + # Defines the following grid: + # + # 0 2 8 + # 0+-+-----+ + # + | + + # + | + + # 6+-+-----+ + # 8+-+-----+ + x_edges = np.array([0, 2, 8]) + y_edges = np.array([0, 6, 8]) + relative_areas = np.array([ + [3, 9], + [1, 3]]) + + # ensure the number of points in each region is proportional to its area + x = np.array([1] + [1]*3 + [7]*3 + [7]*9) + y = np.array([7] + [1]*3 + [7]*3 + [1]*9) + + # sanity check that the above worked as intended + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges)) + assert_equal(hist, relative_areas) + + # resulting histogram should be uniform, since counts and areas are proportional + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True) + assert_equal(hist, 1 / (8*8)) + + def test_density_non_uniform_1d(self): + # compare to histogram to show the results are the same + v = np.arange(10) + bins = np.array([0, 1, 3, 6, 10]) + hist, edges = histogram(v, bins, density=True) + hist_dd, edges_dd = histogramdd((v,), (bins,), density=True) + assert_equal(hist, hist_dd) + assert_equal(edges, edges_dd[0]) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_index_tricks.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_index_tricks.py new file mode 100644 index 0000000000000000000000000000000000000000..be2e0e6e72bb76297606f4db09ae7aa364ef9a49 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_index_tricks.py @@ -0,0 +1,553 @@ +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_raises_regex, + ) +from numpy.lib._index_tricks_impl import ( + mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, + index_exp, ndindex, c_, r_, s_, ix_ + ) + + +class TestRavelUnravelIndex: + def test_basic(self): + assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) + + # test that new shape argument works properly + assert_equal(np.unravel_index(indices=2, + shape=(2, 2)), + (1, 0)) + + # test that an invalid second keyword argument + # is properly handled, including the old name `dims`. + with assert_raises(TypeError): + np.unravel_index(indices=2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(254, ims=(17, 94)) + + with assert_raises(TypeError): + np.unravel_index(254, dims=(17, 94)) + + assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) + assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) + assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) + assert_raises(ValueError, np.unravel_index, -1, (2, 2)) + assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) + assert_raises(ValueError, np.unravel_index, 4, (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) + assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) + + assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4]) + assert_equal( + np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4) + + arr = np.array([[3, 6, 6], [4, 5, 1]]) + assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) + assert_equal( + np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) + assert_equal( + np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) + assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), + [12, 13, 13]) + assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) + + assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), + [[3, 6, 6], [4, 5, 1]]) + assert_equal( + np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), + [[3, 6, 6], [4, 5, 1]]) + assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) + + def test_empty_indices(self): + msg1 = 'indices must be integral: the provided empty sequence was' + msg2 = 'only int indices permitted' + assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5)) + assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5)) + assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]), + (10, 3, 5)) + assert_equal(np.unravel_index(np.array([],dtype=int), (10, 3, 5)), + [[], [], []]) + assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []), + (10, 3)) + assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']), + (10, 3)) + assert_raises_regex(TypeError, msg2, np.ravel_multi_index, + (np.array([]), np.array([])), (5, 3)) + assert_equal(np.ravel_multi_index( + (np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), []) + assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int), + (5, 3)), []) + + def test_big_indices(self): + # ravel_multi_index for big indices (issue #7546) + if np.intp == np.int64: + arr = ([1, 29], [3, 5], [3, 117], [19, 2], + [2379, 1284], [2, 2], [0, 1]) + assert_equal( + np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)), + [5627771580, 117259570957]) + + # test unravel_index for big indices (issue #9538) + assert_raises(ValueError, np.unravel_index, 1, (2**32-1, 2**31+1)) + + # test overflow checking for too big array (issue #7546) + dummy_arr = ([0],[0]) + half_max = np.iinfo(np.intp).max // 2 + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2)), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max+1, 2)) + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max+1, 2), order='F') + + def test_dtypes(self): + # Test with different data types + for dtype in [np.int16, np.uint16, np.int32, + np.uint32, np.int64, np.uint64]: + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) + shape = (5, 8) + uncoords = 8*coords[0]+coords[1] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0]+5*coords[1] + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], + dtype=dtype) + shape = (5, 8, 10) + uncoords = 10*(8*coords[0]+coords[1])+coords[2] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0]+5*(coords[1]+8*coords[2]) + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + def test_clipmodes(self): + # Test clipmodes + assert_equal( + np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), + np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) + assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), + mode=( + 'wrap', 'raise', 'clip', 'raise')), + np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) + assert_raises( + ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) + + def test_writeability(self): + # See gh-7269 + x, y = np.unravel_index([1, 2, 3], (4, 5)) + assert_(x.flags.writeable) + assert_(y.flags.writeable) + + def test_0d(self): + # gh-580 + x = np.unravel_index(0, ()) + assert_equal(x, ()) + + assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ()) + assert_raises_regex( + ValueError, "out of bounds", np.unravel_index, [1], ()) + + @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"]) + def test_empty_array_ravel(self, mode): + res = np.ravel_multi_index( + np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode) + assert(res.shape == (0,)) + + with assert_raises(ValueError): + np.ravel_multi_index( + np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode) + + def test_empty_array_unravel(self): + res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0)) + # res is a tuple of three empty arrays + assert(len(res) == 3) + assert(all(a.shape == (0,) for a in res)) + + with assert_raises(ValueError): + np.unravel_index([1], (2, 1, 0)) + +class TestGrid: + def test_basic(self): + a = mgrid[-1:1:10j] + b = mgrid[-1:1:0.1] + assert_(a.shape == (10,)) + assert_(b.shape == (20,)) + assert_(a[0] == -1) + assert_almost_equal(a[-1], 1) + assert_(b[0] == -1) + assert_almost_equal(b[1]-b[0], 0.1, 11) + assert_almost_equal(b[-1], b[0]+19*0.1, 11) + assert_almost_equal(a[1]-a[0], 2.0/9.0, 11) + + def test_linspace_equivalence(self): + y, st = np.linspace(2, 10, retstep=True) + assert_almost_equal(st, 8/49.0) + assert_array_almost_equal(y, mgrid[2:10:50j], 13) + + def test_nd(self): + c = mgrid[-1:1:10j, -2:2:10j] + d = mgrid[-1:1:0.1, -2:2:0.2] + assert_(c.shape == (2, 10, 10)) + assert_(d.shape == (2, 20, 20)) + assert_array_equal(c[0][0, :], -np.ones(10, 'd')) + assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd')) + assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) + assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11) + assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], + 0.1*np.ones(20, 'd'), 11) + assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], + 0.2*np.ones(20, 'd'), 11) + + def test_sparse(self): + grid_full = mgrid[-1:1:10j, -2:2:10j] + grid_sparse = ogrid[-1:1:10j, -2:2:10j] + + # sparse grids can be made dense by broadcasting + grid_broadcast = np.broadcast_arrays(*grid_sparse) + for f, b in zip(grid_full, grid_broadcast): + assert_equal(f, b) + + @pytest.mark.parametrize("start, stop, step, expected", [ + (None, 10, 10j, (200, 10)), + (-10, 20, None, (1800, 30)), + ]) + def test_mgrid_size_none_handling(self, start, stop, step, expected): + # regression test None value handling for + # start and step values used by mgrid; + # internally, this aims to cover previously + # unexplored code paths in nd_grid() + grid = mgrid[start:stop:step, start:stop:step] + # need a smaller grid to explore one of the + # untested code paths + grid_small = mgrid[start:stop:step] + assert_equal(grid.size, expected[0]) + assert_equal(grid_small.size, expected[1]) + + def test_accepts_npfloating(self): + # regression test for #16466 + grid64 = mgrid[0.1:0.33:0.1, ] + grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1), ] + assert_array_almost_equal(grid64, grid32) + # At some point this was float64, but NEP 50 changed it: + assert grid32.dtype == np.float32 + assert grid64.dtype == np.float64 + + # different code path for single slice + grid64 = mgrid[0.1:0.33:0.1] + grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1)] + assert_(grid32.dtype == np.float64) + assert_array_almost_equal(grid64, grid32) + + def test_accepts_longdouble(self): + # regression tests for #16945 + grid64 = mgrid[0.1:0.33:0.1, ] + grid128 = mgrid[ + np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1), + ] + assert_(grid128.dtype == np.longdouble) + assert_array_almost_equal(grid64, grid128) + + grid128c_a = mgrid[0:np.longdouble(1):3.4j] + grid128c_b = mgrid[0:np.longdouble(1):3.4j, ] + assert_(grid128c_a.dtype == grid128c_b.dtype == np.longdouble) + assert_array_equal(grid128c_a, grid128c_b[0]) + + # different code path for single slice + grid64 = mgrid[0.1:0.33:0.1] + grid128 = mgrid[ + np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1) + ] + assert_(grid128.dtype == np.longdouble) + assert_array_almost_equal(grid64, grid128) + + def test_accepts_npcomplexfloating(self): + # Related to #16466 + assert_array_almost_equal( + mgrid[0.1:0.3:3j, ], mgrid[0.1:0.3:np.complex64(3j), ] + ) + + # different code path for single slice + assert_array_almost_equal( + mgrid[0.1:0.3:3j], mgrid[0.1:0.3:np.complex64(3j)] + ) + + # Related to #16945 + grid64_a = mgrid[0.1:0.3:3.3j] + grid64_b = mgrid[0.1:0.3:3.3j, ][0] + assert_(grid64_a.dtype == grid64_b.dtype == np.float64) + assert_array_equal(grid64_a, grid64_b) + + grid128_a = mgrid[0.1:0.3:np.clongdouble(3.3j)] + grid128_b = mgrid[0.1:0.3:np.clongdouble(3.3j), ][0] + assert_(grid128_a.dtype == grid128_b.dtype == np.longdouble) + assert_array_equal(grid64_a, grid64_b) + + +class TestConcatenator: + def test_1d(self): + assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) + b = np.ones(5) + c = r_[b, 0, 0, b] + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + + def test_mixed_type(self): + g = r_[10.1, 1:10] + assert_(g.dtype == 'f8') + + def test_more_mixed_type(self): + g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0] + assert_(g.dtype == 'f8') + + def test_complex_step(self): + # Regression test for #12262 + g = r_[0:36:100j] + assert_(g.shape == (100,)) + + # Related to #16466 + g = r_[0:36:np.complex64(100j)] + assert_(g.shape == (100,)) + + def test_2d(self): + b = np.random.rand(5, 5) + c = np.random.rand(5, 5) + d = r_['1', b, c] # append columns + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b) + assert_array_equal(d[:, 5:], c) + d = r_[b, c] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5, :], b) + assert_array_equal(d[5:, :], c) + + def test_0d(self): + assert_equal(r_[0, np.array(1), 2], [0, 1, 2]) + assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3]) + assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3]) + + +class TestNdenumerate: + def test_basic(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(list(ndenumerate(a)), + [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) + + +class TestIndexExpression: + def test_regression_1(self): + # ticket #1196 + a = np.arange(2) + assert_equal(a[:-1], a[s_[:-1]]) + assert_equal(a[:-1], a[index_exp[:-1]]) + + def test_simple_1(self): + a = np.random.rand(4, 5, 6) + + assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) + assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) + + +class TestIx_: + def test_regression_1(self): + # Test empty untyped inputs create outputs of indexing type, gh-5804 + a, = np.ix_(range(0)) + assert_equal(a.dtype, np.intp) + + a, = np.ix_([]) + assert_equal(a.dtype, np.intp) + + # but if the type is specified, don't change it + a, = np.ix_(np.array([], dtype=np.float32)) + assert_equal(a.dtype, np.float32) + + def test_shape_and_dtype(self): + sizes = (4, 5, 3, 2) + # Test both lists and arrays + for func in (range, np.arange): + arrays = np.ix_(*[func(sz) for sz in sizes]) + for k, (a, sz) in enumerate(zip(arrays, sizes)): + assert_equal(a.shape[k], sz) + assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k)) + assert_(np.issubdtype(a.dtype, np.integer)) + + def test_bool(self): + bool_a = [True, False, True, True] + int_a, = np.nonzero(bool_a) + assert_equal(np.ix_(bool_a)[0], int_a) + + def test_1d_only(self): + idx2d = [[1, 2, 3], [4, 5, 6]] + assert_raises(ValueError, np.ix_, idx2d) + + def test_repeated_input(self): + length_of_vector = 5 + x = np.arange(length_of_vector) + out = ix_(x, x) + assert_equal(out[0].shape, (length_of_vector, 1)) + assert_equal(out[1].shape, (1, length_of_vector)) + # check that input shape is not modified + assert_equal(x.shape, (length_of_vector,)) + + +def test_c_(): + a = c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] + assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) + + +class TestFillDiagonal: + def test_basic(self): + a = np.zeros((3, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + ) + + def test_tall_matrix(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + ) + + def test_tall_matrix_wrap(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5, True) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0]]) + ) + + def test_wide_matrix(self): + a = np.zeros((3, 10), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 5, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]) + ) + + def test_operate_4d_array(self): + a = np.zeros((3, 3, 3, 3), int) + fill_diagonal(a, 4) + i = np.array([0, 1, 2]) + assert_equal(np.where(a != 0), (i, i, i, i)) + + def test_low_dim_handling(self): + # raise error with low dimensionality + a = np.zeros(3, int) + with assert_raises_regex(ValueError, "at least 2-d"): + fill_diagonal(a, 5) + + def test_hetero_shape_handling(self): + # raise error with high dimensionality and + # shape mismatch + a = np.zeros((3,3,7,3), int) + with assert_raises_regex(ValueError, "equal length"): + fill_diagonal(a, 2) + + +def test_diag_indices(): + di = diag_indices(4) + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + a[di] = 100 + assert_array_equal( + a, np.array([[100, 2, 3, 4], + [5, 100, 7, 8], + [9, 10, 100, 12], + [13, 14, 15, 100]]) + ) + + # Now, we create indices to manipulate a 3-d array: + d3 = diag_indices(2, 3) + + # And use it to set the diagonal of a zeros array to 1: + a = np.zeros((2, 2, 2), int) + a[d3] = 1 + assert_array_equal( + a, np.array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + ) + + +class TestDiagIndicesFrom: + + def test_diag_indices_from(self): + x = np.random.random((4, 4)) + r, c = diag_indices_from(x) + assert_array_equal(r, np.arange(4)) + assert_array_equal(c, np.arange(4)) + + def test_error_small_input(self): + x = np.ones(7) + with assert_raises_regex(ValueError, "at least 2-d"): + diag_indices_from(x) + + def test_error_shape_mismatch(self): + x = np.zeros((3, 3, 2, 3), int) + with assert_raises_regex(ValueError, "equal length"): + diag_indices_from(x) + + +def test_ndindex(): + x = list(ndindex(1, 2, 3)) + expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))] + assert_array_equal(x, expected) + + x = list(ndindex((1, 2, 3))) + assert_array_equal(x, expected) + + # Test use of scalars and tuples + x = list(ndindex((3,))) + assert_array_equal(x, list(ndindex(3))) + + # Make sure size argument is optional + x = list(ndindex()) + assert_equal(x, [()]) + + x = list(ndindex(())) + assert_equal(x, [()]) + + # Make sure 0-sized ndindex works correctly + x = list(ndindex(*[0])) + assert_equal(x, []) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_io.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_io.py new file mode 100644 index 0000000000000000000000000000000000000000..a340cac35efb328d4d1629947940758b03ad6206 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_io.py @@ -0,0 +1,2801 @@ +import sys +import gc +import gzip +import os +import threading +import time +import warnings +import re +import pytest +from pathlib import Path +from tempfile import NamedTemporaryFile +from io import BytesIO, StringIO +from datetime import datetime +import locale +from multiprocessing import Value, get_context +from ctypes import c_bool + +import numpy as np +import numpy.ma as ma +from numpy.exceptions import VisibleDeprecationWarning +from numpy.lib._iotools import ConverterError, ConversionWarning +from numpy.lib import _npyio_impl +from numpy.lib._npyio_impl import recfromcsv, recfromtxt +from numpy.ma.testutils import assert_equal +from numpy.testing import ( + assert_warns, assert_, assert_raises_regex, assert_raises, + assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY, + HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings, + break_cycles, IS_WASM + ) +from numpy.testing._private.utils import requires_memory +from numpy._utils import asbytes + + +class TextIO(BytesIO): + """Helper IO class. + + Writes encode strings to bytes if needed, reads return bytes. + This makes it easier to emulate files opened in binary mode + without needing to explicitly convert strings to bytes in + setting up the test data. + + """ + def __init__(self, s=""): + BytesIO.__init__(self, asbytes(s)) + + def write(self, s): + BytesIO.write(self, asbytes(s)) + + def writelines(self, lines): + BytesIO.writelines(self, [asbytes(s) for s in lines]) + + +IS_64BIT = sys.maxsize > 2**32 +try: + import bz2 + HAS_BZ2 = True +except ImportError: + HAS_BZ2 = False +try: + import lzma + HAS_LZMA = True +except ImportError: + HAS_LZMA = False + + +def strptime(s, fmt=None): + """ + This function is available in the datetime module only from Python >= + 2.5. + + """ + if type(s) == bytes: + s = s.decode("latin1") + return datetime(*time.strptime(s, fmt)[:3]) + + +class RoundtripTest: + def roundtrip(self, save_func, *args, **kwargs): + """ + save_func : callable + Function used to save arrays to file. + file_on_disk : bool + If true, store the file on disk, instead of in a + string buffer. + save_kwds : dict + Parameters passed to `save_func`. + load_kwds : dict + Parameters passed to `numpy.load`. + args : tuple of arrays + Arrays stored to file. + + """ + save_kwds = kwargs.get('save_kwds', {}) + load_kwds = kwargs.get('load_kwds', {"allow_pickle": True}) + file_on_disk = kwargs.get('file_on_disk', False) + + if file_on_disk: + target_file = NamedTemporaryFile(delete=False) + load_file = target_file.name + else: + target_file = BytesIO() + load_file = target_file + + try: + arr = args + + save_func(target_file, *arr, **save_kwds) + target_file.flush() + target_file.seek(0) + + if sys.platform == 'win32' and not isinstance(target_file, BytesIO): + target_file.close() + + arr_reloaded = np.load(load_file, **load_kwds) + + self.arr = arr + self.arr_reloaded = arr_reloaded + finally: + if not isinstance(target_file, BytesIO): + target_file.close() + # holds an open file descriptor so it can't be deleted on win + if 'arr_reloaded' in locals(): + if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): + os.remove(target_file.name) + + def check_roundtrips(self, a): + self.roundtrip(a) + self.roundtrip(a, file_on_disk=True) + self.roundtrip(np.asfortranarray(a)) + self.roundtrip(np.asfortranarray(a), file_on_disk=True) + if a.shape[0] > 1: + # neither C nor Fortran contiguous for 2D arrays or more + self.roundtrip(np.asfortranarray(a)[1:]) + self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True) + + def test_array(self): + a = np.array([], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], int) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) + self.check_roundtrips(a) + + def test_array_object(self): + a = np.array([], object) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], object) + self.check_roundtrips(a) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + self.roundtrip(a) + + @pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32") + def test_mmap(self): + a = np.array([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + a = np.asfortranarray([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + def test_record(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + self.check_roundtrips(a) + + @pytest.mark.slow + def test_format_2_0(self): + dt = [(("%d" % i) * 100, float) for i in range(500)] + a = np.ones(1000, dtype=dt) + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', UserWarning) + self.check_roundtrips(a) + + +class TestSaveLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.save, *args, **kwargs) + assert_equal(self.arr[0], self.arr_reloaded) + assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) + assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) + + +class TestSavezLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) + try: + for n, arr in enumerate(self.arr): + reloaded = self.arr_reloaded['arr_%d' % n] + assert_equal(arr, reloaded) + assert_equal(arr.dtype, reloaded.dtype) + assert_equal(arr.flags.fnc, reloaded.flags.fnc) + finally: + # delete tempfile, must be done here on windows + if self.arr_reloaded.fid: + self.arr_reloaded.fid.close() + os.remove(self.arr_reloaded.fid.name) + + @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + @pytest.mark.slow + def test_big_arrays(self): + L = (1 << 31) + 100000 + a = np.empty(L, dtype=np.uint8) + with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp: + np.savez(tmp, a=a) + del a + npfile = np.load(tmp) + a = npfile['a'] # Should succeed + npfile.close() + del a # Avoid pyflakes unused variable warning. + + def test_multiple_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + self.roundtrip(a, b) + + def test_named_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(a, l['file_a']) + assert_equal(b, l['file_b']) + + + def test_tuple_getitem_raises(self): + # gh-23748 + a = np.array([1, 2, 3]) + f = BytesIO() + np.savez(f, a=a) + f.seek(0) + l = np.load(f) + with pytest.raises(KeyError, match="(1, 2)"): + l[1, 2] + + def test_BagObj(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(sorted(dir(l.f)), ['file_a','file_b']) + assert_equal(a, l.f.file_a) + assert_equal(b, l.f.file_b) + + @pytest.mark.skipif(IS_WASM, reason="Cannot start thread") + def test_savez_filename_clashes(self): + # Test that issue #852 is fixed + # and savez functions in multithreaded environment + + def writer(error_list): + with temppath(suffix='.npz') as tmp: + arr = np.random.randn(500, 500) + try: + np.savez(tmp, arr=arr) + except OSError as err: + error_list.append(err) + + errors = [] + threads = [threading.Thread(target=writer, args=(errors,)) + for j in range(3)] + for t in threads: + t.start() + for t in threads: + t.join() + + if errors: + raise AssertionError(errors) + + def test_not_closing_opened_fid(self): + # Test that issue #2178 is fixed: + # verify could seek on 'loaded' file + with temppath(suffix='.npz') as tmp: + with open(tmp, 'wb') as fp: + np.savez(fp, data='LOVELY LOAD') + with open(tmp, 'rb', 10000) as fp: + fp.seek(0) + assert_(not fp.closed) + np.load(fp)['data'] + # fp must not get closed by .load + assert_(not fp.closed) + fp.seek(0) + assert_(not fp.closed) + + @pytest.mark.slow_pypy + def test_closing_fid(self): + # Test that issue #1517 (too many opened files) remains closed + # It might be a "weak" test since failed to get triggered on + # e.g. Debian sid of 2012 Jul 05 but was reported to + # trigger the failure on Ubuntu 10.04: + # http://projects.scipy.org/numpy/ticket/1517#comment:2 + with temppath(suffix='.npz') as tmp: + np.savez(tmp, data='LOVELY LOAD') + # We need to check if the garbage collector can properly close + # numpy npz file returned by np.load when their reference count + # goes to zero. Python 3 running in debug mode raises a + # ResourceWarning when file closing is left to the garbage + # collector, so we catch the warnings. + with suppress_warnings() as sup: + sup.filter(ResourceWarning) # TODO: specify exact message + for i in range(1, 1025): + try: + np.load(tmp)["data"] + except Exception as e: + msg = "Failed to load data from a file: %s" % e + raise AssertionError(msg) + finally: + if IS_PYPY: + gc.collect() + + def test_closing_zipfile_after_load(self): + # Check that zipfile owns file and can close it. This needs to + # pass a file name to load for the test. On windows failure will + # cause a second error will be raised when the attempt to remove + # the open file is made. + prefix = 'numpy_test_closing_zipfile_after_load_' + with temppath(suffix='.npz', prefix=prefix) as tmp: + np.savez(tmp, lab='place holder') + data = np.load(tmp) + fp = data.zip.fp + data.close() + assert_(fp.closed) + + @pytest.mark.parametrize("count, expected_repr", [ + (1, "NpzFile {fname!r} with keys: arr_0"), + (5, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4"), + # _MAX_REPR_ARRAY_COUNT is 5, so files with more than 5 keys are + # expected to end in '...' + (6, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4..."), + ]) + def test_repr_lists_keys(self, count, expected_repr): + a = np.array([[1, 2], [3, 4]], float) + with temppath(suffix='.npz') as tmp: + np.savez(tmp, *[a]*count) + l = np.load(tmp) + assert repr(l) == expected_repr.format(fname=tmp) + l.close() + + +class TestSaveTxt: + def test_array(self): + a = np.array([[1, 2], [3, 4]], float) + fmt = "%.18e" + c = BytesIO() + np.savetxt(c, a, fmt=fmt) + c.seek(0) + assert_equal(c.readlines(), + [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)), + asbytes((fmt + ' ' + fmt + '\n') % (3, 4))]) + + a = np.array([[1, 2], [3, 4]], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n']) + + def test_0D_3D(self): + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, np.array(1)) + assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]])) + + def test_structured(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_structured_padded(self): + # gh-13297 + a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[ + ('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4') + ]) + c = BytesIO() + np.savetxt(c, a[['foo', 'baz']], fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 3\n', b'4 6\n']) + + def test_multifield_view(self): + a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')]) + v = a[['x', 'z']] + with temppath(suffix='.npy') as path: + path = Path(path) + np.save(path, v) + data = np.load(path) + assert_array_equal(data, v) + + def test_delimiter(self): + a = np.array([[1., 2.], [3., 4.]]) + c = BytesIO() + np.savetxt(c, a, delimiter=',', fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1,2\n', b'3,4\n']) + + def test_format(self): + a = np.array([(1, 2), (3, 4)]) + c = BytesIO() + # Sequence of formats + np.savetxt(c, a, fmt=['%02d', '%3.1f']) + c.seek(0) + assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n']) + + # A single multiformat string + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Specify delimiter, should be overridden + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Bad fmt, should raise a ValueError + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, a, fmt=99) + + def test_header_footer(self): + # Test the functionality of the header and footer keyword argument. + + c = BytesIO() + a = np.array([(1, 2), (3, 4)], dtype=int) + test_header_footer = 'Test header / footer' + # Test the header keyword argument + np.savetxt(c, a, fmt='%1d', header=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('# ' + test_header_footer + '\n1 2\n3 4\n')) + # Test the footer keyword argument + c = BytesIO() + np.savetxt(c, a, fmt='%1d', footer=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n# ' + test_header_footer + '\n')) + # Test the commentstr keyword argument used on the header + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + header=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n')) + # Test the commentstr keyword argument used on the footer + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + footer=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n')) + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_file_roundtrip(self, filename_type): + with temppath() as name: + a = np.array([(1, 2), (3, 4)]) + np.savetxt(filename_type(name), a) + b = np.loadtxt(filename_type(name)) + assert_array_equal(a, b) + + def test_complex_arrays(self): + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re + 1.0j * im + + # One format only + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n', + b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']) + + # One format for each real and imaginary part + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n', + b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']) + + # One format for each complex number + c = BytesIO() + np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n', + b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']) + + def test_complex_negative_exponent(self): + # Previous to 1.15, some formats generated x+-yj, gh 7895 + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n', + b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n']) + + + def test_custom_writer(self): + + class CustomWriter(list): + def write(self, text): + self.extend(text.split(b'\n')) + + w = CustomWriter() + a = np.array([(1, 2), (3, 4)]) + np.savetxt(w, a) + b = np.loadtxt(w) + assert_array_equal(a, b) + + def test_unicode(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + with tempdir() as tmpdir: + # set encoding as on windows it may not be unicode even on py3 + np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'], + encoding='UTF-8') + + def test_unicode_roundtrip(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + # our gz wrapper support encoding + suffixes = ['', '.gz'] + if HAS_BZ2: + suffixes.append('.bz2') + if HAS_LZMA: + suffixes.extend(['.xz', '.lzma']) + with tempdir() as tmpdir: + for suffix in suffixes: + np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a, + fmt=['%s'], encoding='UTF-16-LE') + b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix), + encoding='UTF-16-LE', dtype=np.str_) + assert_array_equal(a, b) + + def test_unicode_bytestream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + s = BytesIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read().decode('UTF-8'), utf8 + '\n') + + def test_unicode_stringstream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + s = StringIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read(), utf8 + '\n') + + @pytest.mark.parametrize("iotype", [StringIO, BytesIO]) + def test_unicode_and_bytes_fmt(self, iotype): + # string type of fmt should not matter, see also gh-4053 + a = np.array([1.]) + s = iotype() + np.savetxt(s, a, fmt="%f") + s.seek(0) + if iotype is StringIO: + assert_equal(s.read(), "%f\n" % 1.) + else: + assert_equal(s.read(), b"%f\n" % 1.) + + @pytest.mark.skipif(sys.platform=='win32', reason="files>4GB may not work") + @pytest.mark.slow + @requires_memory(free_bytes=7e9) + def test_large_zip(self): + def check_large_zip(memoryerror_raised): + memoryerror_raised.value = False + try: + # The test takes at least 6GB of memory, writes a file larger + # than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile`` + test_data = np.asarray([np.random.rand( + np.random.randint(50,100),4) + for i in range(800000)], dtype=object) + with tempdir() as tmpdir: + np.savez(os.path.join(tmpdir, 'test.npz'), + test_data=test_data) + except MemoryError: + memoryerror_raised.value = True + raise + # run in a subprocess to ensure memory is released on PyPy, see gh-15775 + # Use an object in shared memory to re-raise the MemoryError exception + # in our process if needed, see gh-16889 + memoryerror_raised = Value(c_bool) + + # Since Python 3.8, the default start method for multiprocessing has + # been changed from 'fork' to 'spawn' on macOS, causing inconsistency + # on memory sharing model, lead to failed test for check_large_zip + ctx = get_context('fork') + p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,)) + p.start() + p.join() + if memoryerror_raised.value: + raise MemoryError("Child process raised a MemoryError exception") + # -9 indicates a SIGKILL, probably an OOM. + if p.exitcode == -9: + pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient") + assert p.exitcode == 0 + +class LoadTxtBase: + def check_compressed(self, fopen, suffixes): + # Test that we can load data from a compressed file + wanted = np.arange(6).reshape((2, 3)) + linesep = ('\n', '\r\n', '\r') + for sep in linesep: + data = '0 1 2' + sep + '3 4 5' + for suffix in suffixes: + with temppath(suffix=suffix) as name: + with fopen(name, mode='wt', encoding='UTF-32-LE') as f: + f.write(data) + res = self.loadfunc(name, encoding='UTF-32-LE') + assert_array_equal(res, wanted) + with fopen(name, "rt", encoding='UTF-32-LE') as f: + res = self.loadfunc(f) + assert_array_equal(res, wanted) + + def test_compressed_gzip(self): + self.check_compressed(gzip.open, ('.gz',)) + + @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2") + def test_compressed_bz2(self): + self.check_compressed(bz2.open, ('.bz2',)) + + @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma") + def test_compressed_lzma(self): + self.check_compressed(lzma.open, ('.xz', '.lzma')) + + def test_encoding(self): + with temppath() as path: + with open(path, "wb") as f: + f.write('0.\n1.\n2.'.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16") + assert_array_equal(x, [0., 1., 2.]) + + def test_stringload(self): + # umlaute + nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8") + with temppath() as path: + with open(path, "wb") as f: + f.write(nonascii.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16", dtype=np.str_) + assert_array_equal(x, nonascii) + + def test_binary_decode(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=np.str_, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_converters_decode(self): + # test converters that decode strings + c = TextIO() + c.write(b'\xcf\x96') + c.seek(0) + x = self.loadfunc(c, dtype=np.str_, encoding="bytes", + converters={0: lambda x: x.decode('UTF-8')}) + a = np.array([b'\xcf\x96'.decode('UTF-8')]) + assert_array_equal(x, a) + + def test_converters_nodecode(self): + # test native string converters enabled by setting an encoding + utf8 = b'\xcf\x96'.decode('UTF-8') + with temppath() as path: + with open(path, 'wt', encoding='UTF-8') as f: + f.write(utf8) + x = self.loadfunc(path, dtype=np.str_, + converters={0: lambda x: x + 't'}, + encoding='UTF-8') + a = np.array([utf8 + 't']) + assert_array_equal(x, a) + + +class TestLoadTxt(LoadTxtBase): + loadfunc = staticmethod(np.loadtxt) + + def setup_method(self): + # lower chunksize for testing + self.orig_chunk = _npyio_impl._loadtxt_chunksize + _npyio_impl._loadtxt_chunksize = 1 + + def teardown_method(self): + _npyio_impl._loadtxt_chunksize = self.orig_chunk + + def test_record(self): + c = TextIO() + c.write('1 2\n3 4') + c.seek(0) + x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)]) + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_array_equal(x, a) + + d = TextIO() + d.write('M 64 75.0\nF 25 60.0') + d.seek(0) + mydescriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + b = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=mydescriptor) + y = np.loadtxt(d, dtype=mydescriptor) + assert_array_equal(y, b) + + def test_array(self): + c = TextIO() + c.write('1 2\n3 4') + + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([[1, 2], [3, 4]], int) + assert_array_equal(x, a) + + c.seek(0) + x = np.loadtxt(c, dtype=float) + a = np.array([[1, 2], [3, 4]], float) + assert_array_equal(x, a) + + def test_1D(self): + c = TextIO() + c.write('1\n2\n3\n4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('1,2,3,4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',') + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + def test_missing(self): + c = TextIO() + c.write('1,2,3,,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + a = np.array([1, 2, 3, -999, 5], int) + assert_array_equal(x, a) + + def test_converters_with_usecols(self): + c = TextIO() + c.write('1,2,3,,5\n6,7,8,9,10\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + a = np.array([[2, -999], [7, 9]], int) + assert_array_equal(x, a) + + def test_comments_unicode(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments='#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_byte(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=b'#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_multiple(self): + c = TextIO() + c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=['#', '@', '//']) + a = np.array([[1, 2, 3], [4, 5, 6]], int) + assert_array_equal(x, a) + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_comments_multi_chars(self): + c = TextIO() + c.write('/* comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments='/*') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + # Check that '/*' is not transformed to ['/', '*'] + c = TextIO() + c.write('*/ comment\n1,2,3,5\n') + c.seek(0) + assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',', + comments='/*') + + def test_skiprows(self): + c = TextIO() + c.write('comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_usecols(self): + a = np.array([[1, 2], [3, 4]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1,)) + assert_array_equal(x, a[:, 1]) + + a = np.array([[1, 2, 3], [3, 4, 5]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1, 2)) + assert_array_equal(x, a[:, 1:]) + + # Testing with arrays instead of tuples. + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) + assert_array_equal(x, a[:, 1:]) + + # Testing with an integer instead of a sequence + for int_type in [int, np.int8, np.int16, + np.int32, np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]: + to_read = int_type(1) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=to_read) + assert_array_equal(x, a[:, 1]) + + # Testing with some crazy custom integer type + class CrazyInt: + def __index__(self): + return 1 + + crazy_int = CrazyInt() + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=crazy_int) + assert_array_equal(x, a[:, 1]) + + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(crazy_int,)) + assert_array_equal(x, a[:, 1]) + + # Checking with dtypes defined converters. + data = '''JOE 70.1 25.3 + BOB 60.5 27.9 + ''' + c = TextIO(data) + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(arr['stid'], [b"JOE", b"BOB"]) + assert_equal(arr['temp'], [25.3, 27.9]) + + # Testing non-ints in usecols + c.seek(0) + bogus_idx = 1.5 + assert_raises_regex( + TypeError, + '^usecols must be.*%s' % type(bogus_idx).__name__, + np.loadtxt, c, usecols=bogus_idx + ) + + assert_raises_regex( + TypeError, + '^usecols must be.*%s' % type(bogus_idx).__name__, + np.loadtxt, c, usecols=[0, bogus_idx, 0] + ) + + def test_bad_usecols(self): + with pytest.raises(OverflowError): + np.loadtxt(["1\n"], usecols=[2**64], delimiter=",") + with pytest.raises((ValueError, OverflowError)): + # Overflow error on 32bit platforms + np.loadtxt(["1\n"], usecols=[2**62], delimiter=",") + with pytest.raises(TypeError, + match="If a structured dtype .*. But 1 usecols were given and " + "the number of fields is 3."): + np.loadtxt(["1,1\n"], dtype="i,2i", usecols=[0], delimiter=",") + + def test_fancy_dtype(self): + c = TextIO() + c.write('1,2,3.0\n4,5,6.0\n') + c.seek(0) + dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + x = np.loadtxt(c, dtype=dt, delimiter=',') + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) + assert_array_equal(x, a) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_3d_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, + [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], + dtype=dt) + assert_array_equal(x, a) + + def test_str_dtype(self): + # see gh-8033 + c = ["str1", "str2"] + + for dt in (str, np.bytes_): + a = np.array(["str1", "str2"], dtype=dt) + x = np.loadtxt(c, dtype=dt) + assert_array_equal(x, a) + + def test_empty_file(self): + with pytest.warns(UserWarning, match="input contained no data"): + c = TextIO() + x = np.loadtxt(c) + assert_equal(x.shape, (0,)) + x = np.loadtxt(c, dtype=np.int64) + assert_equal(x.shape, (0,)) + assert_(x.dtype == np.int64) + + def test_unused_converter(self): + c = TextIO() + c.writelines(['1 21\n', '3 42\n']) + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_array_equal(data, [21, 42]) + + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_array_equal(data, [33, 66]) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + def test_uint64_type(self): + tgt = (9223372043271415339, 9223372043271415853) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.uint64) + assert_equal(res, tgt) + + def test_int64_type(self): + tgt = (-9223372036854775807, 9223372036854775807) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.int64) + assert_equal(res, tgt) + + def test_from_float_hex(self): + # IEEE doubles and floats only, otherwise the float32 + # conversion may fail. + tgt = np.logspace(-10, 10, 5).astype(np.float32) + tgt = np.hstack((tgt, -tgt)).astype(float) + inp = '\n'.join(map(float.hex, tgt)) + c = TextIO() + c.write(inp) + for dt in [float, np.float32]: + c.seek(0) + res = np.loadtxt( + c, dtype=dt, converters=float.fromhex, encoding="latin1") + assert_equal(res, tgt, err_msg="%s" % dt) + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_default_float_converter_no_default_hex_conversion(self): + """ + Ensure that fromhex is only used for values with the correct prefix and + is not called by default. Regression test related to gh-19598. + """ + c = TextIO("a b c") + with pytest.raises(ValueError, + match=".*convert string 'a' to float64 at row 0, column 1"): + np.loadtxt(c) + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_default_float_converter_exception(self): + """ + Ensure that the exception message raised during failed floating point + conversion is correct. Regression test related to gh-19598. + """ + c = TextIO("qrs tuv") # Invalid values for default float converter + with pytest.raises(ValueError, + match="could not convert string 'qrs' to float64"): + np.loadtxt(c) + + def test_from_complex(self): + tgt = (complex(1, 1), complex(1, -1)) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, tgt) + + def test_complex_misformatted(self): + # test for backward compatibility + # some complex formats used to generate x+-yj + a = np.zeros((2, 2), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.16e') + c.seek(0) + txt = c.read() + c.seek(0) + # misformat the sign on the imaginary part, gh 7895 + txt_bad = txt.replace(b'e+00-', b'e00+-') + assert_(txt_bad != txt) + c.write(txt_bad) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, a) + + def test_universal_newline(self): + with temppath() as name: + with open(name, 'w') as f: + f.write('1 21\r3 42\r') + data = np.loadtxt(name) + assert_array_equal(data, [[1, 21], [3, 42]]) + + def test_empty_field_after_tab(self): + c = TextIO() + c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t') + c.seek(0) + dt = {'names': ('x', 'y', 'z', 'comment'), + 'formats': (' num rows + c = TextIO() + c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1, max_rows=6) + a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int) + assert_array_equal(x, a) + + @pytest.mark.parametrize(["skip", "data"], [ + (1, ["ignored\n", "1,2\n", "\n", "3,4\n"]), + # "Bad" lines that do not end in newlines: + (1, ["ignored", "1,2", "", "3,4"]), + (1, StringIO("ignored\n1,2\n\n3,4")), + # Same as above, but do not skip any lines: + (0, ["-1,0\n", "1,2\n", "\n", "3,4\n"]), + (0, ["-1,0", "1,2", "", "3,4"]), + (0, StringIO("-1,0\n1,2\n\n3,4"))]) + def test_max_rows_empty_lines(self, skip, data): + with pytest.warns(UserWarning, + match=f"Input line 3.*max_rows={3-skip}"): + res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3-skip) + assert_array_equal(res, [[-1, 0], [1, 2], [3, 4]][skip:]) + + if isinstance(data, StringIO): + data.seek(0) + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + with pytest.raises(UserWarning): + np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3-skip) + +class Testfromregex: + def test_record(self): + c = TextIO() + c.write('1.312 foo\n1.534 bar\n4.444 qux') + c.seek(0) + + dt = [('num', np.float64), ('val', 'S3')] + x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt) + a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_2(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.int32), ('val', 'S3')] + x = np.fromregex(c, r"(\d+)\s+(...)", dt) + a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_3(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.float64)] + x = np.fromregex(c, r"(\d+)\s+...", dt) + a = np.array([(1312,), (1534,), (4444,)], dtype=dt) + assert_array_equal(x, a) + + @pytest.mark.parametrize("path_type", [str, Path]) + def test_record_unicode(self, path_type): + utf8 = b'\xcf\x96' + with temppath() as str_path: + path = path_type(str_path) + with open(path, 'wb') as f: + f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux') + + dt = [('num', np.float64), ('val', 'U4')] + x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8') + a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'), + (4.444, 'qux')], dtype=dt) + assert_array_equal(x, a) + + regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE) + x = np.fromregex(path, regexp, dt, encoding='UTF-8') + assert_array_equal(x, a) + + def test_compiled_bytes(self): + regexp = re.compile(b'(\\d)') + c = BytesIO(b'123') + dt = [('num', np.float64)] + a = np.array([1, 2, 3], dtype=dt) + x = np.fromregex(c, regexp, dt) + assert_array_equal(x, a) + + def test_bad_dtype_not_structured(self): + regexp = re.compile(b'(\\d)') + c = BytesIO(b'123') + with pytest.raises(TypeError, match='structured datatype'): + np.fromregex(c, regexp, dtype=np.float64) + + +#####-------------------------------------------------------------------------- + + +class TestFromTxt(LoadTxtBase): + loadfunc = staticmethod(np.genfromtxt) + + def test_record(self): + # Test w/ explicit dtype + data = TextIO('1 2\n3 4') + test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)]) + control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_equal(test, control) + # + data = TextIO('M 64.0 75.0\nF 25.0 60.0') + descriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], + dtype=descriptor) + test = np.genfromtxt(data, dtype=descriptor) + assert_equal(test, control) + + def test_array(self): + # Test outputting a standard ndarray + data = TextIO('1 2\n3 4') + control = np.array([[1, 2], [3, 4]], dtype=int) + test = np.genfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data.seek(0) + control = np.array([[1, 2], [3, 4]], dtype=float) + test = np.loadtxt(data, dtype=float) + assert_array_equal(test, control) + + def test_1D(self): + # Test squeezing to 1D + control = np.array([1, 2, 3, 4], int) + # + data = TextIO('1\n2\n3\n4\n') + test = np.genfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data = TextIO('1,2,3,4\n') + test = np.genfromtxt(data, dtype=int, delimiter=',') + assert_array_equal(test, control) + + def test_comments(self): + # Test the stripping of comments + control = np.array([1, 2, 3, 5], int) + # Comment on its own line + data = TextIO('# comment\n1,2,3,5\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + # Comment at the end of a line + data = TextIO('1,2,3,5# comment\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + + def test_skiprows(self): + # Test row skipping + control = np.array([1, 2, 3, 5], int) + kwargs = dict(dtype=int, delimiter=',') + # + data = TextIO('comment\n1,2,3,5\n') + test = np.genfromtxt(data, skip_header=1, **kwargs) + assert_equal(test, control) + # + data = TextIO('# comment\n1,2,3,5\n') + test = np.loadtxt(data, skiprows=1, **kwargs) + assert_equal(test, control) + + def test_skip_footer(self): + data = ["# %i" % i for i in range(1, 6)] + data.append("A, B, C") + data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)]) + data[-1] = "99,99" + kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10) + test = np.genfromtxt(TextIO("\n".join(data)), **kwargs) + ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)], + dtype=[(_, float) for _ in "ABC"]) + assert_equal(test, ctrl) + + def test_skip_footer_with_invalid(self): + with suppress_warnings() as sup: + sup.filter(ConversionWarning) + basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' + # Footer too small to get rid of all invalid values + assert_raises(ValueError, np.genfromtxt, + TextIO(basestr), skip_footer=1) + # except ValueError: + # pass + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + a = np.genfromtxt(TextIO(basestr), skip_footer=3) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n' + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]])) + a = np.genfromtxt( + TextIO(basestr), skip_footer=3, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]])) + + def test_header(self): + # Test retrieving a header + data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, dtype=None, names=True, + encoding='bytes') + assert_(w[0].category is VisibleDeprecationWarning) + control = {'gender': np.array([b'M', b'F']), + 'age': np.array([64.0, 25.0]), + 'weight': np.array([75.0, 60.0])} + assert_equal(test['gender'], control['gender']) + assert_equal(test['age'], control['age']) + assert_equal(test['weight'], control['weight']) + + def test_auto_dtype(self): + # Test the automatic definition of the output dtype + data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, dtype=None, encoding='bytes') + assert_(w[0].category is VisibleDeprecationWarning) + control = [np.array([b'A', b'BCD']), + np.array([64, 25]), + np.array([75.0, 60.0]), + np.array([3 + 4j, 5 + 6j]), + np.array([True, False]), ] + assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4']) + for (i, ctrl) in enumerate(control): + assert_equal(test['f%i' % i], ctrl) + + def test_auto_dtype_uniform(self): + # Tests whether the output dtype can be uniformized + data = TextIO('1 2 3 4\n5 6 7 8\n') + test = np.genfromtxt(data, dtype=None) + control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) + assert_equal(test, control) + + def test_fancy_dtype(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.genfromtxt(data, dtype=fancydtype, delimiter=',') + control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_names_overwrite(self): + # Test overwriting the names of the dtype + descriptor = {'names': ('g', 'a', 'w'), + 'formats': ('S1', 'i4', 'f4')} + data = TextIO(b'M 64.0 75.0\nF 25.0 60.0') + names = ('gender', 'age', 'weight') + test = np.genfromtxt(data, dtype=descriptor, names=names) + descriptor['names'] = names + control = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=descriptor) + assert_equal(test, control) + + def test_bad_fname(self): + with pytest.raises(TypeError, match='fname must be a string,'): + np.genfromtxt(123) + + def test_commented_header(self): + # Check that names can be retrieved even if the line is commented out. + data = TextIO(""" +#gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + # The # is part of the first name and should be deleted automatically. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None, + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], + dtype=[('gender', '|S1'), ('age', int), ('weight', float)]) + assert_equal(test, ctrl) + # Ditto, but we should get rid of the first element + data = TextIO(b""" +# gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None, + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test, ctrl) + + def test_names_and_comments_none(self): + # Tests case when names is true but comments is None (gh-10780) + data = TextIO('col1 col2\n 1 2\n 3 4') + test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True) + control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)]) + assert_equal(test, control) + + def test_file_is_closed_on_error(self): + # gh-13200 + with tempdir() as tmpdir: + fpath = os.path.join(tmpdir, "test.csv") + with open(fpath, "wb") as f: + f.write('\N{GREEK PI SYMBOL}'.encode()) + + # ResourceWarnings are emitted from a destructor, so won't be + # detected by regular propagation to errors. + with assert_no_warnings(): + with pytest.raises(UnicodeDecodeError): + np.genfromtxt(fpath, encoding="ascii") + + def test_autonames_and_usecols(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, usecols=('A', 'C', 'D'), + names=True, dtype=None, encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + control = np.array(('aaaa', 45, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_with_usecols(self): + # Test the combination user-defined converters and usecol + data = TextIO('1,2,3,,5\n6,7,8,9,10\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + control = np.array([[2, -999], [7, 9]], int) + assert_equal(test, control) + + def test_converters_with_usecols_and_names(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True, + dtype=None, encoding="bytes", + converters={'C': lambda s: 2 * int(s)}) + assert_(w[0].category is VisibleDeprecationWarning) + control = np.array(('aaaa', 90, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_cornercases(self): + # Test the conversion to datetime. + converter = { + 'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.genfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', np.object_), ('stid', float)]) + assert_equal(test, control) + + def test_converters_cornercases2(self): + # Test the conversion to datetime64. + converter = { + 'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.genfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', 'datetime64[us]'), ('stid', float)]) + assert_equal(test, control) + + def test_unused_converter(self): + # Test whether unused converters are forgotten + data = TextIO("1 21\n 3 42\n") + test = np.genfromtxt(data, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_equal(test, [21, 42]) + # + data.seek(0) + test = np.genfromtxt(data, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_equal(test, [33, 66]) + + def test_invalid_converter(self): + strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or + (b'r' not in x.lower() and x.strip() or 0.0)) + strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or + (b'%' not in x.lower() and x.strip() or 0.0)) + s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" + "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n" + "D02N03,10/10/2004,R 1,,7,145.55") + kwargs = dict( + converters={2: strip_per, 3: strip_rand}, delimiter=",", + dtype=None, encoding="bytes") + assert_raises(ConverterError, np.genfromtxt, s, **kwargs) + + def test_tricky_converter_bug1666(self): + # Test some corner cases + s = TextIO('q1,2\nq3,4') + cnv = lambda s: float(s[1:]) + test = np.genfromtxt(s, delimiter=',', converters={0: cnv}) + control = np.array([[1., 2.], [3., 4.]]) + assert_equal(test, control) + + def test_dtype_with_converters(self): + dstr = "2009; 23; 46" + test = np.genfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: bytes}) + control = np.array([('2009', 23., 46)], + dtype=[('f0', '|S4'), ('f1', float), ('f2', float)]) + assert_equal(test, control) + test = np.genfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: float}) + control = np.array([2009., 23., 46],) + assert_equal(test, control) + + @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") + def test_dtype_with_converters_and_usecols(self): + dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" + dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3} + dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')] + conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} + test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + names=None, converters=conv, encoding="bytes") + control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp) + assert_equal(test, control) + dtyp = [('e1', 'i4'), ('e2', 'i4'), ('n', 'i1')] + test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + usecols=(0, 1, 3), names=None, converters=conv, + encoding="bytes") + control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp) + assert_equal(test, control) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + ndtype = [('nest', [('idx', int), ('code', object)])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + + # nested but empty fields also aren't supported + ndtype = [('idx', int), ('code', object), ('nest', [])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + + def test_dtype_with_object_no_converter(self): + # Object without a converter uses bytes: + parsed = np.genfromtxt(TextIO("1"), dtype=object) + assert parsed[()] == b"1" + parsed = np.genfromtxt(TextIO("string"), dtype=object) + assert parsed[()] == b"string" + + def test_userconverters_with_explicit_dtype(self): + # Test user_converters w/ explicit (standard) dtype + data = TextIO('skip,skip,2001-01-01,1.0,skip') + test = np.genfromtxt(data, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: bytes}) + control = np.array([('2001-01-01', 1.)], + dtype=[('', '|S10'), ('', float)]) + assert_equal(test, control) + + def test_utf8_userconverters_with_explicit_dtype(self): + utf8 = b'\xcf\x96' + with temppath() as path: + with open(path, 'wb') as f: + f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip') + test = np.genfromtxt(path, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: str}, + encoding='UTF-8') + control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)], + dtype=[('', '|U11'), ('', float)]) + assert_equal(test, control) + + def test_spacedelimiter(self): + # Test space delimiter + data = TextIO("1 2 3 4 5\n6 7 8 9 10") + test = np.genfromtxt(data) + control = np.array([[1., 2., 3., 4., 5.], + [6., 7., 8., 9., 10.]]) + assert_equal(test, control) + + def test_integer_delimiter(self): + # Test using an integer for delimiter + data = " 1 2 3\n 4 5 67\n890123 4" + test = np.genfromtxt(TextIO(data), delimiter=3) + control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]]) + assert_equal(test, control) + + def test_missing(self): + data = TextIO('1,2,3,,5\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + control = np.array([1, 2, 3, -999, 5], int) + assert_equal(test, control) + + def test_missing_with_tabs(self): + # Test w/ a delimiter tab + txt = "1\t2\t3\n\t2\t\n1\t\t3" + test = np.genfromtxt(TextIO(txt), delimiter="\t", + usemask=True,) + ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],) + ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool) + assert_equal(test.data, ctrl_d) + assert_equal(test.mask, ctrl_m) + + def test_usecols(self): + # Test the selection of columns + # Select 1 column + control = np.array([[1, 2], [3, 4]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=(1,)) + assert_equal(test, control[:, 1]) + # + control = np.array([[1, 2, 3], [3, 4, 5]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=(1, 2)) + assert_equal(test, control[:, 1:]) + # Testing with arrays instead of tuples. + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2])) + assert_equal(test, control[:, 1:]) + + def test_usecols_as_css(self): + # Test giving usecols with a comma-separated string + data = "1 2 3\n4 5 6" + test = np.genfromtxt(TextIO(data), + names="a, b, c", usecols="a, c") + ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"]) + assert_equal(test, ctrl) + + def test_usecols_with_structured_dtype(self): + # Test usecols with an explicit structured dtype + data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9") + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + test = np.genfromtxt( + data, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(test['stid'], [b"JOE", b"BOB"]) + assert_equal(test['temp'], [25.3, 27.9]) + + def test_usecols_with_integer(self): + # Test usecols with an integer + test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0) + assert_equal(test, np.array([1., 4.])) + + def test_usecols_with_named_columns(self): + # Test usecols with named columns + ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)]) + data = "1 2 3\n4 5 6" + kwargs = dict(names="a, b, c") + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data), + usecols=('a', 'c'), **kwargs) + assert_equal(test, ctrl) + + def test_empty_file(self): + # Test that an empty file raises the proper warning. + with suppress_warnings() as sup: + sup.filter(message="genfromtxt: Empty input file:") + data = TextIO() + test = np.genfromtxt(data) + assert_equal(test, np.array([])) + + # when skip_header > 0 + test = np.genfromtxt(data, skip_header=1) + assert_equal(test, np.array([])) + + def test_fancy_dtype_alt(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True) + control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.genfromtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_withmissing(self): + data = TextIO('A,B\n0,1\n2,N/A') + kwargs = dict(delimiter=",", missing_values="N/A", names=True) + test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + # + data.seek(0) + test = np.genfromtxt(data, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', float), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_user_missing_values(self): + data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" + basekwargs = dict(dtype=None, delimiter=",", names=True,) + mdtype = [('A', int), ('B', float), ('C', complex)] + # + test = np.genfromtxt(TextIO(data), missing_values="N/A", + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)], + dtype=mdtype) + assert_equal(test, control) + # + basekwargs['dtype'] = mdtype + test = np.genfromtxt(TextIO(data), + missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + # + test = np.genfromtxt(TextIO(data), + missing_values={0: -9, 'B': -99, 'C': -999j}, + usemask=True, + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + + def test_user_filling_values(self): + # Test with missing and filling values + ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)]) + data = "N/A, 2, 3\n4, ,???" + kwargs = dict(delimiter=",", + dtype=int, + names="a,b,c", + missing_values={0: "N/A", 'b': " ", 2: "???"}, + filling_values={0: 0, 'b': 0, 2: -999}) + test = np.genfromtxt(TextIO(data), **kwargs) + ctrl = np.array([(0, 2, 3), (4, 0, -999)], + dtype=[(_, int) for _ in "abc"]) + assert_equal(test, ctrl) + # + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"]) + assert_equal(test, ctrl) + + data2 = "1,2,*,4\n5,*,7,8\n" + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=0) + ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]]) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=-1) + ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]]) + assert_equal(test, ctrl) + + def test_withmissing_float(self): + data = TextIO('A,B\n0,1.5\n2,-999.00') + test = np.genfromtxt(data, dtype=None, delimiter=',', + missing_values='-999.0', names=True, usemask=True) + control = ma.array([(0, 1.5), (2, -1.)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_with_masked_column_uniform(self): + # Test masked column + data = TextIO('1 2 3\n4 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]]) + assert_equal(test, control) + + def test_with_masked_column_various(self): + # Test masked column + data = TextIO('True 2 3\nFalse 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([(1, 2, 3), (0, 5, 6)], + mask=[(0, 1, 0), (0, 1, 0)], + dtype=[('f0', bool), ('f1', bool), ('f2', int)]) + assert_equal(test, control) + + def test_invalid_raise(self): + # Test invalid raise + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + + kwargs = dict(delimiter=",", dtype=None, names=True) + def f(): + return np.genfromtxt(mdata, invalid_raise=False, **kwargs) + mtest = assert_warns(ConversionWarning, f) + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) + # + mdata.seek(0) + assert_raises(ValueError, np.genfromtxt, mdata, + delimiter=",", names=True) + + def test_invalid_raise_with_usecols(self): + # Test invalid_raise with usecols + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + + kwargs = dict(delimiter=",", dtype=None, names=True, + invalid_raise=False) + def f(): + return np.genfromtxt(mdata, usecols=(0, 4), **kwargs) + mtest = assert_warns(ConversionWarning, f) + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) + # + mdata.seek(0) + mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs) + assert_equal(len(mtest), 50) + control = np.ones(50, dtype=[(_, int) for _ in 'ab']) + control[[10 * _ for _ in range(5)]] = (2, 2) + assert_equal(mtest, control) + + def test_inconsistent_dtype(self): + # Test inconsistent dtype + data = ["1, 1, 1, 1, -1.1"] * 50 + mdata = TextIO("\n".join(data)) + + converters = {4: lambda x: "(%s)" % x.decode()} + kwargs = dict(delimiter=",", converters=converters, + dtype=[(_, int) for _ in 'abcde'], encoding="bytes") + assert_raises(ValueError, np.genfromtxt, mdata, **kwargs) + + def test_default_field_format(self): + # Test default format + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=None, defaultfmt="f%02i") + ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)], + dtype=[("f00", int), ("f01", int), ("f02", float)]) + assert_equal(mtest, ctrl) + + def test_single_dtype_wo_names(self): + # Test single dtype w/o names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, defaultfmt="f%02i") + ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_explicit_names(self): + # Test single dtype w explicit names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, names="a, b, c") + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_implicit_names(self): + # Test single dtype w implicit names + data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, names=True) + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_easy_structured_dtype(self): + # Test easy structured dtype + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), delimiter=",", + dtype=(int, float, float), defaultfmt="f_%02i") + ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)], + dtype=[("f_00", int), ("f_01", float), ("f_02", float)]) + assert_equal(mtest, ctrl) + + def test_autostrip(self): + # Test autostrip + data = "01/01/2003 , 1.3, abcde" + kwargs = dict(delimiter=",", dtype=None, encoding="bytes") + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + mtest = np.genfromtxt(TextIO(data), **kwargs) + assert_(w[0].category is VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')], + dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')]) + assert_equal(mtest, ctrl) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs) + assert_(w[0].category is VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003', 1.3, 'abcde')], + dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')]) + assert_equal(mtest, ctrl) + + def test_replace_space(self): + # Test the 'replace_space' option + txt = "A.A, B (B), C:C\n1, 2, 3.14" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_replace_space_known_dtype(self): + # Test the 'replace_space' (and related) options when dtype != None + txt = "A.A, B (B), C:C\n1, 2, 3" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_incomplete_names(self): + # Test w/ incomplete names + data = "A,,C\n0,1,2\n3,4,5" + kwargs = dict(delimiter=",", names=True) + # w/ dtype=None + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, int) for _ in ('A', 'f0', 'C')]) + test = np.genfromtxt(TextIO(data), dtype=None, **kwargs) + assert_equal(test, ctrl) + # w/ default dtype + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, float) for _ in ('A', 'f0', 'C')]) + test = np.genfromtxt(TextIO(data), **kwargs) + + def test_names_auto_completion(self): + # Make sure that names are properly completed + data = "1 2 3\n 4 5 6" + test = np.genfromtxt(TextIO(data), + dtype=(int, float, int), names="a") + ctrl = np.array([(1, 2, 3), (4, 5, 6)], + dtype=[('a', int), ('f0', float), ('f1', int)]) + assert_equal(test, ctrl) + + def test_names_with_usecols_bug1636(self): + # Make sure we pick up the right names w/ usecols + data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4" + ctrl_names = ("A", "C", "E") + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=(0, 2, 4), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=int, delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + + def test_fixed_width_names(self): + # Test fix-width w/ names + data = " A B C\n 0 1 2.3\n 45 67 9." + kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None) + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + # + kwargs = dict(delimiter=5, names=True, dtype=None) + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_filling_values(self): + # Test missing values + data = b"1, 2, 3\n1, , 5\n0, 6, \n" + kwargs = dict(delimiter=",", dtype=None, filling_values=-999) + ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_comments_is_none(self): + # Github issue 329 (None was previously being converted to 'None'). + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test[1], b'testNonetherestofthedata') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test[1], b' testNonetherestofthedata') + + def test_latin1(self): + latin1 = b'\xf6\xfc\xf6' + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + latin1 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test[1, 0], b"test1") + assert_equal(test[1, 1], b"testNonethe" + latin1) + assert_equal(test[1, 2], b"test3") + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding='latin1') + assert_equal(test[1, 0], "test1") + assert_equal(test[1, 1], "testNonethe" + latin1.decode('latin1')) + assert_equal(test[1, 2], "test3") + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test['f0'], 0) + assert_equal(test['f1'], b"testNonethe" + latin1) + + def test_binary_decode_autodtype(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_utf8_byte_encoding(self): + utf8 = b"\xcf\x96" + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + utf8 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + ctl = np.array([ + [b'norm1', b'norm2', b'norm3'], + [b'test1', b'testNonethe' + utf8, b'test3'], + [b'norm1', b'norm2', b'norm3']]) + assert_array_equal(test, ctl) + + def test_utf8_file(self): + utf8 = b"\xcf\x96" + with temppath() as path: + with open(path, "wb") as f: + f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + ctl = np.array([ + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"], + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]], + dtype=np.str_) + assert_array_equal(test, ctl) + + # test a mixed dtype + with open(path, "wb") as f: + f.write(b"0,testNonethe" + utf8) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + assert_equal(test['f0'], 0) + assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8")) + + def test_utf8_file_nodtype_unicode(self): + # bytes encoding with non-latin1 -> unicode upcast + utf8 = '\u03d6' + latin1 = '\xf6\xfc\xf6' + + # skip test if cannot encode utf8 test string with preferred + # encoding. The preferred encoding is assumed to be the default + # encoding of open. Will need to change this for PyTest, maybe + # using pytest.mark.xfail(raises=***). + try: + encoding = locale.getpreferredencoding() + utf8.encode(encoding) + except (UnicodeError, ImportError): + pytest.skip('Skipping test_utf8_file_nodtype_unicode, ' + 'unable to encode utf8 in preferred encoding') + + with temppath() as path: + with open(path, "wt") as f: + f.write("norm1,norm2,norm3\n") + f.write("norm1," + latin1 + ",norm3\n") + f.write("test1,testNonethe" + utf8 + ",test3\n") + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', + VisibleDeprecationWarning) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="bytes") + # Check for warning when encoding not specified. + assert_(w[0].category is VisibleDeprecationWarning) + ctl = np.array([ + ["norm1", "norm2", "norm3"], + ["norm1", latin1, "norm3"], + ["test1", "testNonethe" + utf8, "test3"]], + dtype=np.str_) + assert_array_equal(test, ctl) + + @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") + def test_recfromtxt(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = dict(delimiter=",", missing_values="N/A", names=True) + test = recfromtxt(data, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = recfromtxt(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + + @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") + def test_recfromcsv(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = dict(missing_values="N/A", names=True, case_sensitive=True, + encoding="bytes") + test = recfromcsv(data, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = recfromcsv(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + # + data = TextIO('A,B\n0,1\n2,3') + test = recfromcsv(data, missing_values='N/A',) + control = np.array([(0, 1), (2, 3)], + dtype=[('a', int), ('b', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,3') + dtype = [('a', int), ('b', float)] + test = recfromcsv(data, missing_values='N/A', dtype=dtype) + control = np.array([(0, 1), (2, 3)], + dtype=dtype) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + #gh-10394 + data = TextIO('color\n"red"\n"blue"') + test = recfromcsv(data, converters={0: lambda x: x.strip('\"')}) + control = np.array([('red',), ('blue',)], dtype=[('color', (str, 4))]) + assert_equal(test.dtype, control.dtype) + assert_equal(test, control) + + def test_max_rows(self): + # Test the `max_rows` keyword argument. + data = '1 2\n3 4\n5 6\n7 8\n9 10\n' + txt = TextIO(data) + a1 = np.genfromtxt(txt, max_rows=3) + a2 = np.genfromtxt(txt) + assert_equal(a1, [[1, 2], [3, 4], [5, 6]]) + assert_equal(a2, [[7, 8], [9, 10]]) + + # max_rows must be at least 1. + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0) + + # An input with several invalid rows. + data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n' + + test = np.genfromtxt(TextIO(data), max_rows=2) + control = np.array([[1., 1.], [2., 2.]]) + assert_equal(test, control) + + # Test keywords conflict + assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1, + max_rows=4) + + # Test with invalid value + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) + + # Test with invalid not raise + with suppress_warnings() as sup: + sup.filter(ConversionWarning) + + test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + # Structured array with field names. + data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n' + + # Test with header, names and comments + txt = TextIO(data) + test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True) + control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)], + dtype=[('c', ' should convert to float + # 2**34 = 17179869184 => should convert to int64 + # 2**10 = 1024 => should convert to int (int32 on 32-bit systems, + # int64 on 64-bit systems) + + data = TextIO('73786976294838206464 17179869184 1024') + + test = np.genfromtxt(data, dtype=None) + + assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) + + assert_(test.dtype['f0'] == float) + assert_(test.dtype['f1'] == np.int64) + assert_(test.dtype['f2'] == np.int_) + + assert_allclose(test['f0'], 73786976294838206464.) + assert_equal(test['f1'], 17179869184) + assert_equal(test['f2'], 1024) + + def test_unpack_float_data(self): + txt = TextIO("1,2,3\n4,5,6\n7,8,9\n0.0,1.0,2.0") + a, b, c = np.loadtxt(txt, delimiter=",", unpack=True) + assert_array_equal(a, np.array([1.0, 4.0, 7.0, 0.0])) + assert_array_equal(b, np.array([2.0, 5.0, 8.0, 1.0])) + assert_array_equal(c, np.array([3.0, 6.0, 9.0, 2.0])) + + def test_unpack_structured(self): + # Regression test for gh-4341 + # Unpacking should work on structured arrays + txt = TextIO("M 21 72\nF 35 58") + dt = {'names': ('a', 'b', 'c'), 'formats': ('S1', 'i4', 'f4')} + a, b, c = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_equal(a.dtype, np.dtype('S1')) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(c.dtype, np.dtype('f4')) + assert_array_equal(a, np.array([b'M', b'F'])) + assert_array_equal(b, np.array([21, 35])) + assert_array_equal(c, np.array([72., 58.])) + + def test_unpack_auto_dtype(self): + # Regression test for gh-4341 + # Unpacking should work when dtype=None + txt = TextIO("M 21 72.\nF 35 58.") + expected = (np.array(["M", "F"]), np.array([21, 35]), np.array([72., 58.])) + test = np.genfromtxt(txt, dtype=None, unpack=True, encoding="utf-8") + for arr, result in zip(expected, test): + assert_array_equal(arr, result) + assert_equal(arr.dtype, result.dtype) + + def test_unpack_single_name(self): + # Regression test for gh-4341 + # Unpacking should work when structured dtype has only one field + txt = TextIO("21\n35") + dt = {'names': ('a',), 'formats': ('i4',)} + expected = np.array([21, 35], dtype=np.int32) + test = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_array_equal(expected, test) + assert_equal(expected.dtype, test.dtype) + + def test_squeeze_scalar(self): + # Regression test for gh-4341 + # Unpacking a scalar should give zero-dim output, + # even if dtype is structured + txt = TextIO("1") + dt = {'names': ('a',), 'formats': ('i4',)} + expected = np.array((1,), dtype=np.int32) + test = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_array_equal(expected, test) + assert_equal((), test.shape) + assert_equal(expected.dtype, test.dtype) + + @pytest.mark.parametrize("ndim", [0, 1, 2]) + def test_ndmin_keyword(self, ndim: int): + # lets have the same behaviour of ndmin as loadtxt + # as they should be the same for non-missing values + txt = "42" + + a = np.loadtxt(StringIO(txt), ndmin=ndim) + b = np.genfromtxt(StringIO(txt), ndmin=ndim) + + assert_array_equal(a, b) + + +class TestPathUsage: + # Test that pathlib.Path can be used + def test_loadtxt(self): + with temppath(suffix='.txt') as path: + path = Path(path) + a = np.array([[1.1, 2], [3, 4]]) + np.savetxt(path, a) + x = np.loadtxt(path) + assert_array_equal(x, a) + + def test_save_load(self): + # Test that pathlib.Path instances can be used with save. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path) + assert_array_equal(data, a) + + def test_save_load_memmap(self): + # Test that pathlib.Path instances can be loaded mem-mapped. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path, mmap_mode='r') + assert_array_equal(data, a) + # close the mem-mapped file + del data + if IS_PYPY: + break_cycles() + break_cycles() + + @pytest.mark.xfail(IS_WASM, reason="memmap doesn't work correctly") + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_save_load_memmap_readwrite(self, filename_type): + with temppath(suffix='.npy') as path: + path = filename_type(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + b = np.load(path, mmap_mode='r+') + a[0][0] = 5 + b[0][0] = 5 + del b # closes the file + if IS_PYPY: + break_cycles() + break_cycles() + data = np.load(path) + assert_array_equal(data, a) + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_savez_load(self, filename_type): + with temppath(suffix='.npz') as path: + path = filename_type(path) + np.savez(path, lab='place holder') + with np.load(path) as data: + assert_array_equal(data['lab'], 'place holder') + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_savez_compressed_load(self, filename_type): + with temppath(suffix='.npz') as path: + path = filename_type(path) + np.savez_compressed(path, lab='place holder') + data = np.load(path) + assert_array_equal(data['lab'], 'place holder') + data.close() + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_genfromtxt(self, filename_type): + with temppath(suffix='.txt') as path: + path = filename_type(path) + a = np.array([(1, 2), (3, 4)]) + np.savetxt(path, a) + data = np.genfromtxt(path) + assert_array_equal(a, data) + + @pytest.mark.parametrize("filename_type", [Path, str]) + @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") + def test_recfromtxt(self, filename_type): + with temppath(suffix='.txt') as path: + path = filename_type(path) + with open(path, 'w') as f: + f.write('A,B\n0,1\n2,3') + + kwargs = dict(delimiter=",", missing_values="N/A", names=True) + test = recfromtxt(path, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + @pytest.mark.parametrize("filename_type", [Path, str]) + @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") + def test_recfromcsv(self, filename_type): + with temppath(suffix='.txt') as path: + path = filename_type(path) + with open(path, 'w') as f: + f.write('A,B\n0,1\n2,3') + + kwargs = dict( + missing_values="N/A", names=True, case_sensitive=True + ) + test = recfromcsv(path, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + +def test_gzip_load(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + + np.save(f, a) + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.load(f), a) + + +# These next two classes encode the minimal API needed to save()/load() arrays. +# The `test_ducktyping` ensures they work correctly +class JustWriter: + def __init__(self, base): + self.base = base + + def write(self, s): + return self.base.write(s) + + def flush(self): + return self.base.flush() + +class JustReader: + def __init__(self, base): + self.base = base + + def read(self, n): + return self.base.read(n) + + def seek(self, off, whence=0): + return self.base.seek(off, whence) + + +def test_ducktyping(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = JustWriter(s) + + np.save(f, a) + f.flush() + s.seek(0) + + f = JustReader(s) + assert_array_equal(np.load(f), a) + + + +def test_gzip_loadtxt(): + # Thanks to another windows brokenness, we can't use + # NamedTemporaryFile: a file created from this function cannot be + # reopened by another open call. So we first put the gzipped string + # of the test reference array, write it to a securely opened file, + # which is then read from by the loadtxt function + s = BytesIO() + g = gzip.GzipFile(fileobj=s, mode='w') + g.write(b'1 2 3\n') + g.close() + + s.seek(0) + with temppath(suffix='.gz') as name: + with open(name, 'wb') as f: + f.write(s.read()) + res = np.loadtxt(name) + s.close() + + assert_array_equal(res, [1, 2, 3]) + + +def test_gzip_loadtxt_from_string(): + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + f.write(b'1 2 3\n') + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.loadtxt(f), [1, 2, 3]) + + +def test_npzfile_dict(): + s = BytesIO() + x = np.zeros((3, 3)) + y = np.zeros((3, 3)) + + np.savez(s, x=x, y=y) + s.seek(0) + + z = np.load(s) + + assert_('x' in z) + assert_('y' in z) + assert_('x' in z.keys()) + assert_('y' in z.keys()) + + for f, a in z.items(): + assert_(f in ['x', 'y']) + assert_equal(a.shape, (3, 3)) + + for a in z.values(): + assert_equal(a.shape, (3, 3)) + + assert_(len(z.items()) == 2) + + for f in z: + assert_(f in ['x', 'y']) + + assert_('x' in z.keys()) + assert (z.get('x') == z['x']).all() + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_load_refcount(): + # Check that objects returned by np.load are directly freed based on + # their refcount, rather than needing the gc to collect them. + + f = BytesIO() + np.savez(f, [1, 2, 3]) + f.seek(0) + + with assert_no_gc_cycles(): + np.load(f) + + f.seek(0) + dt = [("a", 'u1', 2), ("b", 'u1', 2)] + with assert_no_gc_cycles(): + x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt) + assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) + +def test_load_multiple_arrays_until_eof(): + f = BytesIO() + np.save(f, 1) + np.save(f, 2) + f.seek(0) + assert np.load(f) == 1 + assert np.load(f) == 2 + with pytest.raises(EOFError): + np.load(f) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_loadtxt.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_loadtxt.py new file mode 100644 index 0000000000000000000000000000000000000000..6ce953701ad512a2d215a309988661ce594f02da --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_loadtxt.py @@ -0,0 +1,1075 @@ +""" +Tests specific to `np.loadtxt` added during the move of loadtxt to be backed +by C code. +These tests complement those found in `test_io.py`. +""" + +import sys +import os +import pytest +from tempfile import NamedTemporaryFile, mkstemp +from io import StringIO + +import numpy as np +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_array_equal, HAS_REFCOUNT, IS_PYPY + + +def test_scientific_notation(): + """Test that both 'e' and 'E' are parsed correctly.""" + data = StringIO( + ( + "1.0e-1,2.0E1,3.0\n" + "4.0e-2,5.0E-1,6.0\n" + "7.0e-3,8.0E1,9.0\n" + "0.0e-4,1.0E-1,2.0" + ) + ) + expected = np.array( + [[0.1, 20., 3.0], [0.04, 0.5, 6], [0.007, 80., 9], [0, 0.1, 2]] + ) + assert_array_equal(np.loadtxt(data, delimiter=","), expected) + + +@pytest.mark.parametrize("comment", ["..", "//", "@-", "this is a comment:"]) +def test_comment_multiple_chars(comment): + content = "# IGNORE\n1.5, 2.5# ABC\n3.0,4.0# XXX\n5.5,6.0\n" + txt = StringIO(content.replace("#", comment)) + a = np.loadtxt(txt, delimiter=",", comments=comment) + assert_equal(a, [[1.5, 2.5], [3.0, 4.0], [5.5, 6.0]]) + + +@pytest.fixture +def mixed_types_structured(): + """ + Fixture providing hetergeneous input data with a structured dtype, along + with the associated structured array. + """ + data = StringIO( + ( + "1000;2.4;alpha;-34\n" + "2000;3.1;beta;29\n" + "3500;9.9;gamma;120\n" + "4090;8.1;delta;0\n" + "5001;4.4;epsilon;-99\n" + "6543;7.8;omega;-1\n" + ) + ) + dtype = np.dtype( + [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] + ) + expected = np.array( + [ + (1000, 2.4, "alpha", -34), + (2000, 3.1, "beta", 29), + (3500, 9.9, "gamma", 120), + (4090, 8.1, "delta", 0), + (5001, 4.4, "epsilon", -99), + (6543, 7.8, "omega", -1) + ], + dtype=dtype + ) + return data, dtype, expected + + +@pytest.mark.parametrize('skiprows', [0, 1, 2, 3]) +def test_structured_dtype_and_skiprows_no_empty_lines( + skiprows, mixed_types_structured): + data, dtype, expected = mixed_types_structured + a = np.loadtxt(data, dtype=dtype, delimiter=";", skiprows=skiprows) + assert_array_equal(a, expected[skiprows:]) + + +def test_unpack_structured(mixed_types_structured): + data, dtype, expected = mixed_types_structured + + a, b, c, d = np.loadtxt(data, dtype=dtype, delimiter=";", unpack=True) + assert_array_equal(a, expected["f0"]) + assert_array_equal(b, expected["f1"]) + assert_array_equal(c, expected["f2"]) + assert_array_equal(d, expected["f3"]) + + +def test_structured_dtype_with_shape(): + dtype = np.dtype([("a", "u1", 2), ("b", "u1", 2)]) + data = StringIO("0,1,2,3\n6,7,8,9\n") + expected = np.array([((0, 1), (2, 3)), ((6, 7), (8, 9))], dtype=dtype) + assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dtype), expected) + + +def test_structured_dtype_with_multi_shape(): + dtype = np.dtype([("a", "u1", (2, 2))]) + data = StringIO("0 1 2 3\n") + expected = np.array([(((0, 1), (2, 3)),)], dtype=dtype) + assert_array_equal(np.loadtxt(data, dtype=dtype), expected) + + +def test_nested_structured_subarray(): + # Test from gh-16678 + point = np.dtype([('x', float), ('y', float)]) + dt = np.dtype([('code', int), ('points', point, (2,))]) + data = StringIO("100,1,2,3,4\n200,5,6,7,8\n") + expected = np.array( + [ + (100, [(1., 2.), (3., 4.)]), + (200, [(5., 6.), (7., 8.)]), + ], + dtype=dt + ) + assert_array_equal(np.loadtxt(data, dtype=dt, delimiter=","), expected) + + +def test_structured_dtype_offsets(): + # An aligned structured dtype will have additional padding + dt = np.dtype("i1, i4, i1, i4, i1, i4", align=True) + data = StringIO("1,2,3,4,5,6\n7,8,9,10,11,12\n") + expected = np.array([(1, 2, 3, 4, 5, 6), (7, 8, 9, 10, 11, 12)], dtype=dt) + assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dt), expected) + + +@pytest.mark.parametrize("param", ("skiprows", "max_rows")) +def test_exception_negative_row_limits(param): + """skiprows and max_rows should raise for negative parameters.""" + with pytest.raises(ValueError, match="argument must be nonnegative"): + np.loadtxt("foo.bar", **{param: -3}) + + +@pytest.mark.parametrize("param", ("skiprows", "max_rows")) +def test_exception_noninteger_row_limits(param): + with pytest.raises(TypeError, match="argument must be an integer"): + np.loadtxt("foo.bar", **{param: 1.0}) + + +@pytest.mark.parametrize( + "data, shape", + [ + ("1 2 3 4 5\n", (1, 5)), # Single row + ("1\n2\n3\n4\n5\n", (5, 1)), # Single column + ] +) +def test_ndmin_single_row_or_col(data, shape): + arr = np.array([1, 2, 3, 4, 5]) + arr2d = arr.reshape(shape) + + assert_array_equal(np.loadtxt(StringIO(data), dtype=int), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=0), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=1), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=2), arr2d) + + +@pytest.mark.parametrize("badval", [-1, 3, None, "plate of shrimp"]) +def test_bad_ndmin(badval): + with pytest.raises(ValueError, match="Illegal value of ndmin keyword"): + np.loadtxt("foo.bar", ndmin=badval) + + +@pytest.mark.parametrize( + "ws", + ( + " ", # space + "\t", # tab + "\u2003", # em + "\u00A0", # non-break + "\u3000", # ideographic space + ) +) +def test_blank_lines_spaces_delimit(ws): + txt = StringIO( + f"1 2{ws}30\n\n{ws}\n" + f"4 5 60{ws}\n {ws} \n" + f"7 8 {ws} 90\n # comment\n" + f"3 2 1" + ) + # NOTE: It is unclear that the ` # comment` should succeed. Except + # for delimiter=None, which should use any whitespace (and maybe + # should just be implemented closer to Python + expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]]) + assert_equal( + np.loadtxt(txt, dtype=int, delimiter=None, comments="#"), expected + ) + + +def test_blank_lines_normal_delimiter(): + txt = StringIO('1,2,30\n\n4,5,60\n\n7,8,90\n# comment\n3,2,1') + expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]]) + assert_equal( + np.loadtxt(txt, dtype=int, delimiter=',', comments="#"), expected + ) + + +@pytest.mark.parametrize("dtype", (float, object)) +def test_maxrows_no_blank_lines(dtype): + txt = StringIO("1.5,2.5\n3.0,4.0\n5.5,6.0") + res = np.loadtxt(txt, dtype=dtype, delimiter=",", max_rows=2) + assert_equal(res.dtype, dtype) + assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype)) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2"))) +def test_exception_message_bad_values(dtype): + txt = StringIO("1,2\n3,XXX\n5,6") + msg = f"could not convert string 'XXX' to {dtype} at row 1, column 2" + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, dtype=dtype, delimiter=",") + + +def test_converters_negative_indices(): + txt = StringIO('1.5,2.5\n3.0,XXX\n5.5,6.0') + conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} + expected = np.array([[1.5, 2.5], [3.0, np.nan], [5.5, 6.0]]) + res = np.loadtxt(txt, dtype=np.float64, delimiter=",", converters=conv) + assert_equal(res, expected) + + +def test_converters_negative_indices_with_usecols(): + txt = StringIO('1.5,2.5,3.5\n3.0,4.0,XXX\n5.5,6.0,7.5\n') + conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} + expected = np.array([[1.5, 3.5], [3.0, np.nan], [5.5, 7.5]]) + res = np.loadtxt( + txt, + dtype=np.float64, + delimiter=",", + converters=conv, + usecols=[0, -1], + ) + assert_equal(res, expected) + + # Second test with variable number of rows: + res = np.loadtxt(StringIO('''0,1,2\n0,1,2,3,4'''), delimiter=",", + usecols=[0, -1], converters={-1: (lambda x: -1)}) + assert_array_equal(res, [[0, -1], [0, -1]]) + + +def test_ragged_error(): + rows = ["1,2,3", "1,2,3", "4,3,2,1"] + with pytest.raises(ValueError, + match="the number of columns changed from 3 to 4 at row 3"): + np.loadtxt(rows, delimiter=",") + + +def test_ragged_usecols(): + # usecols, and negative ones, work even with varying number of columns. + txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n") + expected = np.array([[0, 0], [0, 0], [0, 0]]) + res = np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2]) + assert_equal(res, expected) + + txt = StringIO("0,0,XXX\n0\n0,XXX,XXX,0,XXX\n") + with pytest.raises(ValueError, + match="invalid column index -2 at row 2 with 1 columns"): + # There is no -2 column in the second row: + np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2]) + + +def test_empty_usecols(): + txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n") + res = np.loadtxt(txt, dtype=np.dtype([]), delimiter=",", usecols=[]) + assert res.shape == (3,) + assert res.dtype == np.dtype([]) + + +@pytest.mark.parametrize("c1", ["a", "の", "🫕"]) +@pytest.mark.parametrize("c2", ["a", "の", "🫕"]) +def test_large_unicode_characters(c1, c2): + # c1 and c2 span ascii, 16bit and 32bit range. + txt = StringIO(f"a,{c1},c,1.0\ne,{c2},2.0,g") + res = np.loadtxt(txt, dtype=np.dtype('U12'), delimiter=",") + expected = np.array( + [f"a,{c1},c,1.0".split(","), f"e,{c2},2.0,g".split(",")], + dtype=np.dtype('U12') + ) + assert_equal(res, expected) + + +def test_unicode_with_converter(): + txt = StringIO("cat,dog\nαβγ,δεζ\nabc,def\n") + conv = {0: lambda s: s.upper()} + res = np.loadtxt( + txt, + dtype=np.dtype("U12"), + converters=conv, + delimiter=",", + encoding=None + ) + expected = np.array([['CAT', 'dog'], ['ΑΒΓ', 'δεζ'], ['ABC', 'def']]) + assert_equal(res, expected) + + +def test_converter_with_structured_dtype(): + txt = StringIO('1.5,2.5,Abc\n3.0,4.0,dEf\n5.5,6.0,ghI\n') + dt = np.dtype([('m', np.int32), ('r', np.float32), ('code', 'U8')]) + conv = {0: lambda s: int(10*float(s)), -1: lambda s: s.upper()} + res = np.loadtxt(txt, dtype=dt, delimiter=",", converters=conv) + expected = np.array( + [(15, 2.5, 'ABC'), (30, 4.0, 'DEF'), (55, 6.0, 'GHI')], dtype=dt + ) + assert_equal(res, expected) + + +def test_converter_with_unicode_dtype(): + """ + With the 'bytes' encoding, tokens are encoded prior to being + passed to the converter. This means that the output of the converter may + be bytes instead of unicode as expected by `read_rows`. + + This test checks that outputs from the above scenario are properly decoded + prior to parsing by `read_rows`. + """ + txt = StringIO('abc,def\nrst,xyz') + conv = bytes.upper + res = np.loadtxt( + txt, dtype=np.dtype("U3"), converters=conv, delimiter=",", + encoding="bytes") + expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']]) + assert_equal(res, expected) + + +def test_read_huge_row(): + row = "1.5, 2.5," * 50000 + row = row[:-1] + "\n" + txt = StringIO(row * 2) + res = np.loadtxt(txt, delimiter=",", dtype=float) + assert_equal(res, np.tile([1.5, 2.5], (2, 50000))) + + +@pytest.mark.parametrize("dtype", "edfgFDG") +def test_huge_float(dtype): + # Covers a non-optimized path that is rarely taken: + field = "0" * 1000 + ".123456789" + dtype = np.dtype(dtype) + value = np.loadtxt([field], dtype=dtype)[()] + assert value == dtype.type("0.123456789") + + +@pytest.mark.parametrize( + ("given_dtype", "expected_dtype"), + [ + ("S", np.dtype("S5")), + ("U", np.dtype("U5")), + ], +) +def test_string_no_length_given(given_dtype, expected_dtype): + """ + The given dtype is just 'S' or 'U' with no length. In these cases, the + length of the resulting dtype is determined by the longest string found + in the file. + """ + txt = StringIO("AAA,5-1\nBBBBB,0-3\nC,4-9\n") + res = np.loadtxt(txt, dtype=given_dtype, delimiter=",") + expected = np.array( + [['AAA', '5-1'], ['BBBBB', '0-3'], ['C', '4-9']], dtype=expected_dtype + ) + assert_equal(res, expected) + assert_equal(res.dtype, expected_dtype) + + +def test_float_conversion(): + """ + Some tests that the conversion to float64 works as accurately as the + Python built-in `float` function. In a naive version of the float parser, + these strings resulted in values that were off by an ULP or two. + """ + strings = [ + '0.9999999999999999', + '9876543210.123456', + '5.43215432154321e+300', + '0.901', + '0.333', + ] + txt = StringIO('\n'.join(strings)) + res = np.loadtxt(txt) + expected = np.array([float(s) for s in strings]) + assert_equal(res, expected) + + +def test_bool(): + # Simple test for bool via integer + txt = StringIO("1, 0\n10, -1") + res = np.loadtxt(txt, dtype=bool, delimiter=",") + assert res.dtype == bool + assert_array_equal(res, [[True, False], [True, True]]) + # Make sure we use only 1 and 0 on the byte level: + assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]]) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") +def test_integer_signs(dtype): + dtype = np.dtype(dtype) + assert np.loadtxt(["+2"], dtype=dtype) == 2 + if dtype.kind == "u": + with pytest.raises(ValueError): + np.loadtxt(["-1\n"], dtype=dtype) + else: + assert np.loadtxt(["-2\n"], dtype=dtype) == -2 + + for sign in ["++", "+-", "--", "-+"]: + with pytest.raises(ValueError): + np.loadtxt([f"{sign}2\n"], dtype=dtype) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") +def test_implicit_cast_float_to_int_fails(dtype): + txt = StringIO("1.0, 2.1, 3.7\n4, 5, 6") + with pytest.raises(ValueError): + np.loadtxt(txt, dtype=dtype, delimiter=",") + +@pytest.mark.parametrize("dtype", (np.complex64, np.complex128)) +@pytest.mark.parametrize("with_parens", (False, True)) +def test_complex_parsing(dtype, with_parens): + s = "(1.0-2.5j),3.75,(7+-5.0j)\n(4),(-19e2j),(0)" + if not with_parens: + s = s.replace("(", "").replace(")", "") + + res = np.loadtxt(StringIO(s), dtype=dtype, delimiter=",") + expected = np.array( + [[1.0-2.5j, 3.75, 7-5j], [4.0, -1900j, 0]], dtype=dtype + ) + assert_equal(res, expected) + + +def test_read_from_generator(): + def gen(): + for i in range(4): + yield f"{i},{2*i},{i**2}" + + res = np.loadtxt(gen(), dtype=int, delimiter=",") + expected = np.array([[0, 0, 0], [1, 2, 1], [2, 4, 4], [3, 6, 9]]) + assert_equal(res, expected) + + +def test_read_from_generator_multitype(): + def gen(): + for i in range(3): + yield f"{i} {i / 4}" + + res = np.loadtxt(gen(), dtype="i, d", delimiter=" ") + expected = np.array([(0, 0.0), (1, 0.25), (2, 0.5)], dtype="i, d") + assert_equal(res, expected) + + +def test_read_from_bad_generator(): + def gen(): + yield from ["1,2", b"3, 5", 12738] + + with pytest.raises( + TypeError, match=r"non-string returned while reading data"): + np.loadtxt(gen(), dtype="i, i", delimiter=",") + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_object_cleanup_on_read_error(): + sentinel = object() + already_read = 0 + + def conv(x): + nonlocal already_read + if already_read > 4999: + raise ValueError("failed half-way through!") + already_read += 1 + return sentinel + + txt = StringIO("x\n" * 10000) + + with pytest.raises(ValueError, match="at row 5000, column 1"): + np.loadtxt(txt, dtype=object, converters={0: conv}) + + assert sys.getrefcount(sentinel) == 2 + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_character_not_bytes_compatible(): + """Test exception when a character cannot be encoded as 'S'.""" + data = StringIO("–") # == \u2013 + with pytest.raises(ValueError): + np.loadtxt(data, dtype="S5") + + +@pytest.mark.parametrize("conv", (0, [float], "")) +def test_invalid_converter(conv): + msg = ( + "converters must be a dictionary mapping columns to converter " + "functions or a single callable." + ) + with pytest.raises(TypeError, match=msg): + np.loadtxt(StringIO("1 2\n3 4"), converters=conv) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_converters_dict_raises_non_integer_key(): + with pytest.raises(TypeError, match="keys of the converters dict"): + np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}) + with pytest.raises(TypeError, match="keys of the converters dict"): + np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}, usecols=0) + + +@pytest.mark.parametrize("bad_col_ind", (3, -3)) +def test_converters_dict_raises_non_col_key(bad_col_ind): + data = StringIO("1 2\n3 4") + with pytest.raises(ValueError, match="converter specified for column"): + np.loadtxt(data, converters={bad_col_ind: int}) + + +def test_converters_dict_raises_val_not_callable(): + with pytest.raises(TypeError, + match="values of the converters dictionary must be callable"): + np.loadtxt(StringIO("1 2\n3 4"), converters={0: 1}) + + +@pytest.mark.parametrize("q", ('"', "'", "`")) +def test_quoted_field(q): + txt = StringIO( + f"{q}alpha, x{q}, 2.5\n{q}beta, y{q}, 4.5\n{q}gamma, z{q}, 5.0\n" + ) + dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)]) + expected = np.array( + [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype + ) + + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar=q) + assert_array_equal(res, expected) + + +@pytest.mark.parametrize("q", ('"', "'", "`")) +def test_quoted_field_with_whitepace_delimiter(q): + txt = StringIO( + f"{q}alpha, x{q} 2.5\n{q}beta, y{q} 4.5\n{q}gamma, z{q} 5.0\n" + ) + dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)]) + expected = np.array( + [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype + ) + + res = np.loadtxt(txt, dtype=dtype, delimiter=None, quotechar=q) + assert_array_equal(res, expected) + + +def test_quote_support_default(): + """Support for quoted fields is disabled by default.""" + txt = StringIO('"lat,long", 45, 30\n') + dtype = np.dtype([('f0', 'U24'), ('f1', np.float64), ('f2', np.float64)]) + + with pytest.raises(ValueError, + match="the dtype passed requires 3 columns but 4 were"): + np.loadtxt(txt, dtype=dtype, delimiter=",") + + # Enable quoting support with non-None value for quotechar param + txt.seek(0) + expected = np.array([("lat,long", 45., 30.)], dtype=dtype) + + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"') + assert_array_equal(res, expected) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_quotechar_multichar_error(): + txt = StringIO("1,2\n3,4") + msg = r".*must be a single unicode character or None" + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, delimiter=",", quotechar="''") + + +def test_comment_multichar_error_with_quote(): + txt = StringIO("1,2\n3,4") + msg = ( + "when multiple comments or a multi-character comment is given, " + "quotes are not supported." + ) + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, delimiter=",", comments="123", quotechar='"') + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, delimiter=",", comments=["#", "%"], quotechar='"') + + # A single character string in a tuple is unpacked though: + res = np.loadtxt(txt, delimiter=",", comments=("#",), quotechar="'") + assert_equal(res, [[1, 2], [3, 4]]) + + +def test_structured_dtype_with_quotes(): + data = StringIO( + ( + "1000;2.4;'alpha';-34\n" + "2000;3.1;'beta';29\n" + "3500;9.9;'gamma';120\n" + "4090;8.1;'delta';0\n" + "5001;4.4;'epsilon';-99\n" + "6543;7.8;'omega';-1\n" + ) + ) + dtype = np.dtype( + [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] + ) + expected = np.array( + [ + (1000, 2.4, "alpha", -34), + (2000, 3.1, "beta", 29), + (3500, 9.9, "gamma", 120), + (4090, 8.1, "delta", 0), + (5001, 4.4, "epsilon", -99), + (6543, 7.8, "omega", -1) + ], + dtype=dtype + ) + res = np.loadtxt(data, dtype=dtype, delimiter=";", quotechar="'") + assert_array_equal(res, expected) + + +def test_quoted_field_is_not_empty(): + txt = StringIO('1\n\n"4"\n""') + expected = np.array(["1", "4", ""], dtype="U1") + res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"') + assert_equal(res, expected) + +def test_quoted_field_is_not_empty_nonstrict(): + # Same as test_quoted_field_is_not_empty but check that we are not strict + # about missing closing quote (this is the `csv.reader` default also) + txt = StringIO('1\n\n"4"\n"') + expected = np.array(["1", "4", ""], dtype="U1") + res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"') + assert_equal(res, expected) + +def test_consecutive_quotechar_escaped(): + txt = StringIO('"Hello, my name is ""Monty""!"') + expected = np.array('Hello, my name is "Monty"!', dtype="U40") + res = np.loadtxt(txt, dtype="U40", delimiter=",", quotechar='"') + assert_equal(res, expected) + + +@pytest.mark.parametrize("data", ("", "\n\n\n", "# 1 2 3\n# 4 5 6\n")) +@pytest.mark.parametrize("ndmin", (0, 1, 2)) +@pytest.mark.parametrize("usecols", [None, (1, 2, 3)]) +def test_warn_on_no_data(data, ndmin, usecols): + """Check that a UserWarning is emitted when no data is read from input.""" + if usecols is not None: + expected_shape = (0, 3) + elif ndmin == 2: + expected_shape = (0, 1) # guess a single column?! + else: + expected_shape = (0,) + + txt = StringIO(data) + with pytest.warns(UserWarning, match="input contained no data"): + res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols) + assert res.shape == expected_shape + + with NamedTemporaryFile(mode="w") as fh: + fh.write(data) + fh.seek(0) + with pytest.warns(UserWarning, match="input contained no data"): + res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols) + assert res.shape == expected_shape + +@pytest.mark.parametrize("skiprows", (2, 3)) +def test_warn_on_skipped_data(skiprows): + data = "1 2 3\n4 5 6" + txt = StringIO(data) + with pytest.warns(UserWarning, match="input contained no data"): + np.loadtxt(txt, skiprows=skiprows) + + +@pytest.mark.parametrize(["dtype", "value"], [ + ("i2", 0x0001), ("u2", 0x0001), + ("i4", 0x00010203), ("u4", 0x00010203), + ("i8", 0x0001020304050607), ("u8", 0x0001020304050607), + # The following values are constructed to lead to unique bytes: + ("float16", 3.07e-05), + ("float32", 9.2557e-41), ("complex64", 9.2557e-41+2.8622554e-29j), + ("float64", -1.758571353180402e-24), + # Here and below, the repr side-steps a small loss of precision in + # complex `str` in PyPy (which is probably fine, as repr works): + ("complex128", repr(5.406409232372729e-29-1.758571353180402e-24j)), + # Use integer values that fit into double. Everything else leads to + # problems due to longdoubles going via double and decimal strings + # causing rounding errors. + ("longdouble", 0x01020304050607), + ("clongdouble", repr(0x01020304050607 + (0x00121314151617 * 1j))), + ("U2", "\U00010203\U000a0b0c")]) +@pytest.mark.parametrize("swap", [True, False]) +def test_byteswapping_and_unaligned(dtype, value, swap): + # Try to create "interesting" values within the valid unicode range: + dtype = np.dtype(dtype) + data = [f"x,{value}\n"] # repr as PyPy `str` truncates some + if swap: + dtype = dtype.newbyteorder() + full_dt = np.dtype([("a", "S1"), ("b", dtype)], align=False) + # The above ensures that the interesting "b" field is unaligned: + assert full_dt.fields["b"][1] == 1 + res = np.loadtxt(data, dtype=full_dt, delimiter=",", + max_rows=1) # max-rows prevents over-allocation + assert res["b"] == dtype.type(value) + + +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efdFD" + "?") +def test_unicode_whitespace_stripping(dtype): + # Test that all numeric types (and bool) strip whitespace correctly + # \u202F is a narrow no-break space, `\n` is just a whitespace if quoted. + # Currently, skip float128 as it did not always support this and has no + # "custom" parsing: + txt = StringIO(' 3 ,"\u202F2\n"') + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"') + assert_array_equal(res, np.array([3, 2]).astype(dtype)) + + +@pytest.mark.parametrize("dtype", "FD") +def test_unicode_whitespace_stripping_complex(dtype): + # Complex has a few extra cases since it has two components and + # parentheses + line = " 1 , 2+3j , ( 4+5j ), ( 6+-7j ) , 8j , ( 9j ) \n" + data = [line, line.replace(" ", "\u202F")] + res = np.loadtxt(data, dtype=dtype, delimiter=',') + assert_array_equal(res, np.array([[1, 2+3j, 4+5j, 6-7j, 8j, 9j]] * 2)) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", "FD") +@pytest.mark.parametrize("field", + ["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"]) +def test_bad_complex(dtype, field): + with pytest.raises(ValueError): + np.loadtxt([field + "\n"], dtype=dtype, delimiter=",") + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efgdFDG" + "?") +def test_nul_character_error(dtype): + # Test that a \0 character is correctly recognized as an error even if + # what comes before is valid (not everything gets parsed internally). + if dtype.lower() == "g": + pytest.xfail("longdouble/clongdouble assignment may misbehave.") + with pytest.raises(ValueError): + np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"') + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efgdFDG" + "?") +def test_no_thousands_support(dtype): + # Mainly to document behaviour, Python supports thousands like 1_1. + # (e and G may end up using different conversion and support it, this is + # a bug but happens...) + if dtype == "e": + pytest.skip("half assignment currently uses Python float converter") + if dtype in "eG": + pytest.xfail("clongdouble assignment is buggy (uses `complex`?).") + + assert int("1_1") == float("1_1") == complex("1_1") == 11 + with pytest.raises(ValueError): + np.loadtxt(["1_1\n"], dtype=dtype) + + +@pytest.mark.parametrize("data", [ + ["1,2\n", "2\n,3\n"], + ["1,2\n", "2\r,3\n"]]) +def test_bad_newline_in_iterator(data): + # In NumPy <=1.22 this was accepted, because newlines were completely + # ignored when the input was an iterable. This could be changed, but right + # now, we raise an error. + msg = "Found an unquoted embedded newline within a single line" + with pytest.raises(ValueError, match=msg): + np.loadtxt(data, delimiter=",") + + +@pytest.mark.parametrize("data", [ + ["1,2\n", "2,3\r\n"], # a universal newline + ["1,2\n", "'2\n',3\n"], # a quoted newline + ["1,2\n", "'2\r',3\n"], + ["1,2\n", "'2\r\n',3\n"], +]) +def test_good_newline_in_iterator(data): + # The quoted newlines will be untransformed here, but are just whitespace. + res = np.loadtxt(data, delimiter=",", quotechar="'") + assert_array_equal(res, [[1., 2.], [2., 3.]]) + + +@pytest.mark.parametrize("newline", ["\n", "\r", "\r\n"]) +def test_universal_newlines_quoted(newline): + # Check that universal newline support within the tokenizer is not applied + # to quoted fields. (note that lines must end in newline or quoted + # fields will not include a newline at all) + data = ['1,"2\n"\n', '3,"4\n', '1"\n'] + data = [row.replace("\n", newline) for row in data] + res = np.loadtxt(data, dtype=object, delimiter=",", quotechar='"') + assert_array_equal(res, [['1', f'2{newline}'], ['3', f'4{newline}1']]) + + +def test_null_character(): + # Basic tests to check that the NUL character is not special: + res = np.loadtxt(["1\0002\0003\n", "4\0005\0006"], delimiter="\000") + assert_array_equal(res, [[1, 2, 3], [4, 5, 6]]) + + # Also not as part of a field (avoid unicode/arrays as unicode strips \0) + res = np.loadtxt(["1\000,2\000,3\n", "4\000,5\000,6"], + delimiter=",", dtype=object) + assert res.tolist() == [["1\000", "2\000", "3"], ["4\000", "5\000", "6"]] + + +def test_iterator_fails_getting_next_line(): + class BadSequence: + def __len__(self): + return 100 + + def __getitem__(self, item): + if item == 50: + raise RuntimeError("Bad things happened!") + return f"{item}, {item+1}" + + with pytest.raises(RuntimeError, match="Bad things happened!"): + np.loadtxt(BadSequence(), dtype=int, delimiter=",") + + +class TestCReaderUnitTests: + # These are internal tests for path that should not be possible to hit + # unless things go very very wrong somewhere. + def test_not_an_filelike(self): + with pytest.raises(AttributeError, match=".*read"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=True) + + def test_filelike_read_fails(self): + # Can only be reached if loadtxt opens the file, so it is hard to do + # via the public interface (although maybe not impossible considering + # the current "DataClass" backing). + class BadFileLike: + counter = 0 + + def read(self, size): + self.counter += 1 + if self.counter > 20: + raise RuntimeError("Bad bad bad!") + return "1,2,3\n" + + with pytest.raises(RuntimeError, match="Bad bad bad!"): + np._core._multiarray_umath._load_from_filelike( + BadFileLike(), dtype=np.dtype("i"), filelike=True) + + def test_filelike_bad_read(self): + # Can only be reached if loadtxt opens the file, so it is hard to do + # via the public interface (although maybe not impossible considering + # the current "DataClass" backing). + + class BadFileLike: + counter = 0 + + def read(self, size): + return 1234 # not a string! + + with pytest.raises(TypeError, + match="non-string returned while reading data"): + np._core._multiarray_umath._load_from_filelike( + BadFileLike(), dtype=np.dtype("i"), filelike=True) + + def test_not_an_iter(self): + with pytest.raises(TypeError, + match="error reading from object, expected an iterable"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=False) + + def test_bad_type(self): + with pytest.raises(TypeError, match="internal error: dtype must"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype="i", filelike=False) + + def test_bad_encoding(self): + with pytest.raises(TypeError, match="encoding must be a unicode"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=False, encoding=123) + + @pytest.mark.parametrize("newline", ["\r", "\n", "\r\n"]) + def test_manual_universal_newlines(self, newline): + # This is currently not available to users, because we should always + # open files with universal newlines enabled `newlines=None`. + # (And reading from an iterator uses slightly different code paths.) + # We have no real support for `newline="\r"` or `newline="\n" as the + # user cannot specify those options. + data = StringIO('0\n1\n"2\n"\n3\n4 #\n'.replace("\n", newline), + newline="") + + res = np._core._multiarray_umath._load_from_filelike( + data, dtype=np.dtype("U10"), filelike=True, + quote='"', comment="#", skiplines=1) + assert_array_equal(res[:, 0], ["1", f"2{newline}", "3", "4 "]) + + +def test_delimiter_comment_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=",") + + +def test_delimiter_quotechar_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", quotechar=",") + + +def test_comment_quotechar_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1 2 3"), comments="#", quotechar="#") + + +def test_delimiter_and_multiple_comments_collision_raises(): + with pytest.raises( + TypeError, match="Comment characters.*cannot include the delimiter" + ): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=["#", ","]) + + +@pytest.mark.parametrize( + "ws", + ( + " ", # space + "\t", # tab + "\u2003", # em + "\u00A0", # non-break + "\u3000", # ideographic space + ) +) +def test_collision_with_default_delimiter_raises(ws): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), comments=ws) + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), quotechar=ws) + + +@pytest.mark.parametrize("nl", ("\n", "\r")) +def test_control_character_newline_raises(nl): + txt = StringIO(f"1{nl}2{nl}3{nl}{nl}4{nl}5{nl}6{nl}{nl}") + msg = "control character.*cannot be a newline" + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, delimiter=nl) + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, comments=nl) + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, quotechar=nl) + + +@pytest.mark.parametrize( + ("generic_data", "long_datum", "unitless_dtype", "expected_dtype"), + [ + ("2012-03", "2013-01-15", "M8", "M8[D]"), # Datetimes + ("spam-a-lot", "tis_but_a_scratch", "U", "U17"), # str + ], +) +@pytest.mark.parametrize("nrows", (10, 50000, 60000)) # lt, eq, gt chunksize +def test_parametric_unit_discovery( + generic_data, long_datum, unitless_dtype, expected_dtype, nrows +): + """Check that the correct unit (e.g. month, day, second) is discovered from + the data when a user specifies a unitless datetime.""" + # Unit should be "D" (days) due to last entry + data = [generic_data] * nrows + [long_datum] + expected = np.array(data, dtype=expected_dtype) + assert len(data) == nrows+1 + assert len(data) == len(expected) + + # file-like path + txt = StringIO("\n".join(data)) + a = np.loadtxt(txt, dtype=unitless_dtype) + assert len(a) == len(expected) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)+"\n") + # loading the full file... + a = np.loadtxt(fname, dtype=unitless_dtype) + assert len(a) == len(expected) + assert a.dtype == expected.dtype + assert_equal(a, expected) + # loading half of the file... + a = np.loadtxt(fname, dtype=unitless_dtype, max_rows=int(nrows/2)) + os.remove(fname) + assert len(a) == int(nrows/2) + assert_equal(a, expected[:int(nrows/2)]) + + +def test_str_dtype_unit_discovery_with_converter(): + data = ["spam-a-lot"] * 60000 + ["XXXtis_but_a_scratch"] + expected = np.array( + ["spam-a-lot"] * 60000 + ["tis_but_a_scratch"], dtype="U17" + ) + conv = lambda s: s.strip("XXX") + + # file-like path + txt = StringIO("\n".join(data)) + a = np.loadtxt(txt, dtype="U", converters=conv) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + a = np.loadtxt(fname, dtype="U", converters=conv) + os.remove(fname) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_control_character_empty(): + with pytest.raises(TypeError, match="Text reading control character must"): + np.loadtxt(StringIO("1 2 3"), delimiter="") + with pytest.raises(TypeError, match="Text reading control character must"): + np.loadtxt(StringIO("1 2 3"), quotechar="") + with pytest.raises(ValueError, match="comments cannot be an empty string"): + np.loadtxt(StringIO("1 2 3"), comments="") + with pytest.raises(ValueError, match="comments cannot be an empty string"): + np.loadtxt(StringIO("1 2 3"), comments=["#", ""]) + + +def test_control_characters_as_bytes(): + """Byte control characters (comments, delimiter) are supported.""" + a = np.loadtxt(StringIO("#header\n1,2,3"), comments=b"#", delimiter=b",") + assert_equal(a, [1, 2, 3]) + + +@pytest.mark.filterwarnings('ignore::UserWarning') +def test_field_growing_cases(): + # Test empty field appending/growing (each field still takes 1 character) + # to see if the final field appending does not create issues. + res = np.loadtxt([""], delimiter=",", dtype=bytes) + assert len(res) == 0 + + for i in range(1, 1024): + res = np.loadtxt(["," * i], delimiter=",", dtype=bytes, max_rows=10) + assert len(res) == i+1 + +@pytest.mark.parametrize("nmax", (10000, 50000, 55000, 60000)) +def test_maxrows_exceeding_chunksize(nmax): + # tries to read all of the file, + # or less, equal, greater than _loadtxt_chunksize + file_length = 60000 + + # file-like path + data = ["a 0.5 1"]*file_length + txt = StringIO("\n".join(data)) + res = np.loadtxt(txt, dtype=str, delimiter=" ", max_rows=nmax) + assert len(res) == nmax + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + res = np.loadtxt(fname, dtype=str, delimiter=" ", max_rows=nmax) + os.remove(fname) + assert len(res) == nmax diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_mixins.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_mixins.py new file mode 100644 index 0000000000000000000000000000000000000000..3a7f5b86f4bdd5e2cb8ba3316761195dec8103df --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_mixins.py @@ -0,0 +1,216 @@ +import numbers +import operator + +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises + + +# NOTE: This class should be kept as an exact copy of the example from the +# docstring for NDArrayOperatorsMixin. + +class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + def __init__(self, value): + self.value = np.asarray(value) + + # One might also consider adding the built-in list type to this + # list, to support operations like np.add(array_like, list) + _HANDLED_TYPES = (np.ndarray, numbers.Number) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + out = kwargs.get('out', ()) + for x in inputs + out: + # Only support operations with instances of _HANDLED_TYPES. + # Use ArrayLike instead of type(self) for isinstance to + # allow subclasses that don't override __array_ufunc__ to + # handle ArrayLike objects. + if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): + return NotImplemented + + # Defer to the implementation of the ufunc on unwrapped values. + inputs = tuple(x.value if isinstance(x, ArrayLike) else x + for x in inputs) + if out: + kwargs['out'] = tuple( + x.value if isinstance(x, ArrayLike) else x + for x in out) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if type(result) is tuple: + # multiple return values + return tuple(type(self)(x) for x in result) + elif method == 'at': + # no return value + return None + else: + # one return value + return type(self)(result) + + def __repr__(self): + return '%s(%r)' % (type(self).__name__, self.value) + + +def wrap_array_like(result): + if type(result) is tuple: + return tuple(ArrayLike(r) for r in result) + else: + return ArrayLike(result) + + +def _assert_equal_type_and_value(result, expected, err_msg=None): + assert_equal(type(result), type(expected), err_msg=err_msg) + if isinstance(result, tuple): + assert_equal(len(result), len(expected), err_msg=err_msg) + for result_item, expected_item in zip(result, expected): + _assert_equal_type_and_value(result_item, expected_item, err_msg) + else: + assert_equal(result.value, expected.value, err_msg=err_msg) + assert_equal(getattr(result.value, 'dtype', None), + getattr(expected.value, 'dtype', None), err_msg=err_msg) + + +_ALL_BINARY_OPERATORS = [ + operator.lt, + operator.le, + operator.eq, + operator.ne, + operator.gt, + operator.ge, + operator.add, + operator.sub, + operator.mul, + operator.truediv, + operator.floordiv, + operator.mod, + divmod, + pow, + operator.lshift, + operator.rshift, + operator.and_, + operator.xor, + operator.or_, +] + + +class TestNDArrayOperatorsMixin: + + def test_array_like_add(self): + + def check(result): + _assert_equal_type_and_value(result, ArrayLike(0)) + + check(ArrayLike(0) + 0) + check(0 + ArrayLike(0)) + + check(ArrayLike(0) + np.array(0)) + check(np.array(0) + ArrayLike(0)) + + check(ArrayLike(np.array(0)) + 0) + check(0 + ArrayLike(np.array(0))) + + check(ArrayLike(np.array(0)) + np.array(0)) + check(np.array(0) + ArrayLike(np.array(0))) + + def test_inplace(self): + array_like = ArrayLike(np.array([0])) + array_like += 1 + _assert_equal_type_and_value(array_like, ArrayLike(np.array([1]))) + + array = np.array([0]) + array += ArrayLike(1) + _assert_equal_type_and_value(array, ArrayLike(np.array([1]))) + + def test_opt_out(self): + + class OptOut: + """Object that opts out of __array_ufunc__.""" + __array_ufunc__ = None + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + array_like = ArrayLike(1) + opt_out = OptOut() + + # supported operations + assert_(array_like + opt_out is opt_out) + assert_(opt_out + array_like is opt_out) + + # not supported + with assert_raises(TypeError): + # don't use the Python default, array_like = array_like + opt_out + array_like += opt_out + with assert_raises(TypeError): + array_like - opt_out + with assert_raises(TypeError): + opt_out - array_like + + def test_subclass(self): + + class SubArrayLike(ArrayLike): + """Should take precedence over ArrayLike.""" + + x = ArrayLike(0) + y = SubArrayLike(1) + _assert_equal_type_and_value(x + y, y) + _assert_equal_type_and_value(y + x, y) + + def test_object(self): + x = ArrayLike(0) + obj = object() + with assert_raises(TypeError): + x + obj + with assert_raises(TypeError): + obj + x + with assert_raises(TypeError): + x += obj + + def test_unary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in [operator.neg, + operator.pos, + abs, + operator.invert]: + _assert_equal_type_and_value(op(array_like), ArrayLike(op(array))) + + def test_forward_binary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(array, 1)) + actual = op(array_like, 1) + err_msg = 'failed for operator {}'.format(op) + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_reflected_binary_methods(self): + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(2, 1)) + actual = op(2, ArrayLike(1)) + err_msg = 'failed for operator {}'.format(op) + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_matmul(self): + array = np.array([1, 2], dtype=np.float64) + array_like = ArrayLike(array) + expected = ArrayLike(np.float64(5)) + _assert_equal_type_and_value(expected, np.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array, array_like)) + + def test_ufunc_at(self): + array = ArrayLike(np.array([1, 2, 3, 4])) + assert_(np.negative.at(array, np.array([0, 1])) is None) + _assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4])) + + def test_ufunc_two_outputs(self): + mantissa, exponent = np.frexp(2 ** -3) + expected = (ArrayLike(mantissa), ArrayLike(exponent)) + _assert_equal_type_and_value( + np.frexp(ArrayLike(2 ** -3)), expected) + _assert_equal_type_and_value( + np.frexp(ArrayLike(np.array(2 ** -3))), expected) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_nanfunctions.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_nanfunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..4c5ca7e5929061e41d87a7fc5d601345b6931fff --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_nanfunctions.py @@ -0,0 +1,1418 @@ +import warnings +import pytest +import inspect +from functools import partial + +import numpy as np +from numpy._core.numeric import normalize_axis_tuple +from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib._nanfunctions_impl import _nan_mask, _replace_nan +from numpy.testing import ( + assert_, assert_equal, assert_almost_equal, assert_raises, + assert_raises_regex, assert_array_equal, suppress_warnings + ) + + +# Test data +_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], + [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833], + [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954], + [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]]) + + +# Rows of _ndat with nans removed +_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]), + np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]), + np.array([0.1042, -0.5954]), + np.array([0.1610, 0.1859, 0.3146])] + +# Rows of _ndat with nans converted to ones +_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170], + [0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833], + [1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954], + [0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]]) + +# Rows of _ndat with nans converted to zeros +_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170], + [0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833], + [0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954], + [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]]) + + +class TestSignatureMatch: + NANFUNCS = { + np.nanmin: np.amin, + np.nanmax: np.amax, + np.nanargmin: np.argmin, + np.nanargmax: np.argmax, + np.nansum: np.sum, + np.nanprod: np.prod, + np.nancumsum: np.cumsum, + np.nancumprod: np.cumprod, + np.nanmean: np.mean, + np.nanmedian: np.median, + np.nanpercentile: np.percentile, + np.nanquantile: np.quantile, + np.nanvar: np.var, + np.nanstd: np.std, + } + IDS = [k.__name__ for k in NANFUNCS] + + @staticmethod + def get_signature(func, default="..."): + """Construct a signature and replace all default parameter-values.""" + prm_list = [] + signature = inspect.signature(func) + for prm in signature.parameters.values(): + if prm.default is inspect.Parameter.empty: + prm_list.append(prm) + else: + prm_list.append(prm.replace(default=default)) + return inspect.Signature(prm_list) + + @pytest.mark.parametrize("nan_func,func", NANFUNCS.items(), ids=IDS) + def test_signature_match(self, nan_func, func): + # Ignore the default parameter-values as they can sometimes differ + # between the two functions (*e.g.* one has `False` while the other + # has `np._NoValue`) + signature = self.get_signature(func) + nan_signature = self.get_signature(nan_func) + np.testing.assert_equal(signature, nan_signature) + + def test_exhaustiveness(self): + """Validate that all nan functions are actually tested.""" + np.testing.assert_equal( + set(self.IDS), set(np.lib._nanfunctions_impl.__all__) + ) + + +class TestNanFunctions_MinMax: + + nanfuncs = [np.nanmin, np.nanmax] + stdfuncs = [np.min, np.max] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + match = "All-NaN slice encountered" + for func in self.nanfuncs: + with pytest.warns(RuntimeWarning, match=match): + out = func(array, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + + def test_masked(self): + mat = np.ma.fix_invalid(_ndat) + msk = mat._mask.copy() + for f in [np.nanmin]: + res = f(mat, axis=1) + tgt = f(_ndat, axis=1) + assert_equal(res, tgt) + assert_equal(mat._mask, msk) + assert_(not np.isinf(mat).any()) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + # check that rows of nan are dealt with for subclasses (#4628) + mine[1] = np.nan + for f in self.nanfuncs: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(np.isnan(res[1]) and not np.isnan(res[0]) + and not np.isnan(res[2])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine) + assert_(res.shape == ()) + assert_(res != np.nan) + assert_(len(w) == 0) + + def test_object_array(self): + arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object) + assert_equal(np.nanmin(arr), 1.0) + assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0]) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + # assert_equal does not work on object arrays of nan + assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan]) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_initial(self, dtype): + class MyNDArray(np.ndarray): + pass + + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + initial = 100 if f is np.nanmax else 0 + + ret1 = f(ar, initial=initial) + assert ret1.dtype == dtype + assert ret1 == initial + + ret2 = f(ar.view(MyNDArray), initial=initial) + assert ret2.dtype == dtype + assert ret2 == initial + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + class MyNDArray(np.ndarray): + pass + + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool) + where[:, 0] = False + + for f in self.nanfuncs: + reference = 4 if f is np.nanmin else 8 + + ret1 = f(ar, where=where, initial=5) + assert ret1.dtype == dtype + assert ret1 == reference + + ret2 = f(ar.view(MyNDArray), where=where, initial=5) + assert ret2.dtype == dtype + assert ret2 == reference + + +class TestNanFunctions_ArgminArgmax: + + nanfuncs = [np.nanargmin, np.nanargmax] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_result_values(self): + for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): + for row in _ndat: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in") + ind = f(row) + val = row[ind] + # comparing with NaN is tricky as the result + # is always false except for NaN != NaN + assert_(not np.isnan(val)) + assert_(not fcmp(val, row).any()) + assert_(not np.equal(val, row[:ind]).any()) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func in self.nanfuncs: + with pytest.raises(ValueError, match="All-NaN slice encountered"): + func(array, axis=axis) + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + assert_raises_regex( + ValueError, + "attempt to get argm.. of an empty sequence", + f, mat, axis=axis) + for axis in [1]: + res = f(mat, axis=axis) + assert_equal(res, np.zeros(0)) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_keepdims(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + reference = 5 if f is np.nanargmin else 8 + ret = f(ar, keepdims=True) + assert ret.ndim == ar.ndim + assert ret == reference + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_out(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + out = np.zeros((), dtype=np.intp) + reference = 5 if f is np.nanargmin else 8 + ret = f(ar, out=out) + assert ret is out + assert ret == reference + + + +_TEST_ARRAYS = { + "0d": np.array(5), + "1d": np.array([127, 39, 93, 87, 46]) +} +for _v in _TEST_ARRAYS.values(): + _v.setflags(write=False) + + +@pytest.mark.parametrize( + "dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O", +) +@pytest.mark.parametrize("mat", _TEST_ARRAYS.values(), ids=_TEST_ARRAYS.keys()) +class TestNanFunctions_NumberTypes: + nanfuncs = { + np.nanmin: np.min, + np.nanmax: np.max, + np.nanargmin: np.argmin, + np.nanargmax: np.argmax, + np.nansum: np.sum, + np.nanprod: np.prod, + np.nancumsum: np.cumsum, + np.nancumprod: np.cumprod, + np.nanmean: np.mean, + np.nanmedian: np.median, + np.nanvar: np.var, + np.nanstd: np.std, + } + nanfunc_ids = [i.__name__ for i in nanfuncs] + + @pytest.mark.parametrize("nanfunc,func", nanfuncs.items(), ids=nanfunc_ids) + @np.errstate(over="ignore") + def test_nanfunc(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + tgt = func(mat) + out = nanfunc(mat) + + assert_almost_equal(out, tgt) + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc,func", + [(np.nanquantile, np.quantile), (np.nanpercentile, np.percentile)], + ids=["nanquantile", "nanpercentile"], + ) + def test_nanfunc_q(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + if mat.dtype.kind == "c": + assert_raises(TypeError, func, mat, q=1) + assert_raises(TypeError, nanfunc, mat, q=1) + + else: + tgt = func(mat, q=1) + out = nanfunc(mat, q=1) + + assert_almost_equal(out, tgt) + + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc,func", + [(np.nanvar, np.var), (np.nanstd, np.std)], + ids=["nanvar", "nanstd"], + ) + def test_nanfunc_ddof(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + tgt = func(mat, ddof=0.5) + out = nanfunc(mat, ddof=0.5) + + assert_almost_equal(out, tgt) + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc", [np.nanvar, np.nanstd] + ) + def test_nanfunc_correction(self, mat, dtype, nanfunc): + mat = mat.astype(dtype) + assert_almost_equal( + nanfunc(mat, correction=0.5), nanfunc(mat, ddof=0.5) + ) + + err_msg = "ddof and correction can't be provided simultaneously." + with assert_raises_regex(ValueError, err_msg): + nanfunc(mat, ddof=0.5, correction=0.5) + + with assert_raises_regex(ValueError, err_msg): + nanfunc(mat, ddof=1, correction=0) + + +class SharedNanFunctionsTestsMixin: + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_dtype(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with suppress_warnings() as sup: + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + sup.filter(ComplexWarning) + tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_char(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with suppress_warnings() as sup: + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + sup.filter(ComplexWarning) + tgt = rf(mat, dtype=c, axis=1).dtype.type + res = nf(mat, dtype=c, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=c, axis=None).dtype.type + res = nf(mat, dtype=c, axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt, "res %s, tgt %s" % (res, tgt)) + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + array = np.eye(3) + mine = array.view(MyNDArray) + for f in self.nanfuncs: + expected_shape = f(array, axis=0).shape + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array, axis=1).shape + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array).shape + res = f(mine) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + + +class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nansum, np.nanprod] + stdfuncs = [np.sum, np.prod] + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func, identity in zip(self.nanfuncs, [0, 1]): + out = func(array, axis=axis) + assert np.all(out == identity) + assert out.dtype == array.dtype + + def test_empty(self): + for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]): + mat = np.zeros((0, 3)) + tgt = [tgt_value]*3 + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = [] + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = tgt_value + res = f(mat, axis=None) + assert_equal(res, tgt) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_initial(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + reference = 28 if f is np.nansum else 3360 + ret = f(ar, initial=2) + assert ret.dtype == dtype + assert ret == reference + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool) + where[:, 0] = False + + for f in self.nanfuncs: + reference = 26 if f is np.nansum else 2240 + ret = f(ar, where=where, initial=2) + assert ret.dtype == dtype + assert ret == reference + + +class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nancumsum, np.nancumprod] + stdfuncs = [np.cumsum, np.cumprod] + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan) + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func, identity in zip(self.nanfuncs, [0, 1]): + out = func(array) + assert np.all(out == identity) + assert out.dtype == array.dtype + + def test_empty(self): + for f, tgt_value in zip(self.nanfuncs, [0, 1]): + mat = np.zeros((0, 3)) + tgt = tgt_value*np.ones((0, 3)) + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = mat + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = np.zeros((0)) + res = f(mat, axis=None) + assert_equal(res, tgt) + + def test_keepdims(self): + for f, g in zip(self.nanfuncs, self.stdfuncs): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = f(mat, axis=axis, out=None) + res = g(mat, axis=axis, out=None) + assert_(res.ndim == tgt.ndim) + + for f in self.nanfuncs: + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + rs = np.random.RandomState(0) + d[rs.rand(*d.shape) < 0.5] = np.nan + res = f(d, axis=None) + assert_equal(res.shape, (1155,)) + for axis in np.arange(4): + res = f(d, axis=axis) + assert_equal(res.shape, (3, 5, 7, 11)) + + def test_result_values(self): + for axis in (-2, -1, 0, 1, None): + tgt = np.cumprod(_ndat_ones, axis=axis) + res = np.nancumprod(_ndat, axis=axis) + assert_almost_equal(res, tgt) + tgt = np.cumsum(_ndat_zeros,axis=axis) + res = np.nancumsum(_ndat, axis=axis) + assert_almost_equal(res, tgt) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.eye(3) + for axis in (-2, -1, 0, 1): + tgt = rf(mat, axis=axis) + res = nf(mat, axis=axis, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + +class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nanmean, np.nanvar, np.nanstd] + stdfuncs = [np.mean, np.var, np.std] + + def test_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool, np.int_, np.object_]: + assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype) + + def test_out_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool, np.int_, np.object_]: + out = np.empty(_ndat.shape[0], dtype=dtype) + assert_raises(TypeError, f, _ndat, axis=1, out=out) + + def test_ddof(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in [0, 1]: + tgt = [rf(d, ddof=ddof) for d in _rdat] + res = nf(_ndat, axis=1, ddof=ddof) + assert_almost_equal(res, tgt) + + def test_ddof_too_big(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + dsize = [len(d) for d in _rdat] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in range(5): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + sup.filter(ComplexWarning) + tgt = [ddof >= d for d in dsize] + res = nf(_ndat, axis=1, ddof=ddof) + assert_equal(np.isnan(res), tgt) + if any(tgt): + assert_(len(sup.log) == 1) + else: + assert_(len(sup.log) == 0) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + match = "(Degrees of freedom <= 0 for slice.)|(Mean of empty slice)" + for func in self.nanfuncs: + with pytest.warns(RuntimeWarning, match=match): + out = func(array, axis=axis) + assert np.isnan(out).all() + + # `nanvar` and `nanstd` convert complex inputs to their + # corresponding floating dtype + if func is np.nanmean: + assert out.dtype == array.dtype + else: + assert out.dtype == np.abs(array).dtype + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool) + where[:, 0] = False + + for f, f_std in zip(self.nanfuncs, self.stdfuncs): + reference = f_std(ar[where][2:]) + dtype_reference = dtype if f is np.nanmean else ar.real.dtype + + ret = f(ar, where=where) + assert ret.dtype == dtype_reference + np.testing.assert_allclose(ret, reference) + + def test_nanstd_with_mean_keyword(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + A[:, 5, :] = np.nan + + mean_out = np.zeros((10, 1, 5)) + std_out = np.zeros((10, 1, 5)) + + mean = np.nanmean(A, + out=mean_out, + axis=1, + keepdims=True) + + # The returned object should be the object specified during calling + assert mean_out is mean + + std = np.nanstd(A, + out=std_out, + axis=1, + keepdims=True, + mean=mean) + + # The returned object should be the object specified during calling + assert std_out is std + + # Shape of returned mean and std should be same + assert std.shape == mean.shape + assert std.shape == (10, 1, 5) + + # Output should be the same as from the individual algorithms + std_old = np.nanstd(A, axis=1, keepdims=True) + + assert std_old.shape == mean.shape + assert_almost_equal(std, std_old) + +_TIME_UNITS = ( + "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as" +) + +# All `inexact` + `timdelta64` type codes +_TYPE_CODES = list(np.typecodes["AllFloat"]) +_TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS] + + +class TestNanFunctions_Median: + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanmedian(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.median(mat, axis=axis, out=None, overwrite_input=False) + res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = np.nanmedian(d, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanmedian(d, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning") + def test_keepdims_out(self, axis): + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = np.empty(shape_out) + result = np.nanmedian(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_out(self): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + tgt = np.median(mat, axis=1) + res = np.nanmedian(nan_mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.median(mat, axis=None) + res = np.nanmedian(nan_mat, axis=None, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanmedian(nan_mat, axis=(0, 1), out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_small_large(self): + # test the small and large code paths, current cutoff 400 elements + for s in [5, 20, 51, 200, 1000]: + d = np.random.randn(4, s) + # Randomly set some elements to NaN: + w = np.random.randint(0, d.size, size=d.size // 5) + d.ravel()[w] = np.nan + d[:,0] = 1. # ensure at least one good value + # use normal median without nans to compare + tgt = [] + for x in d: + nonan = np.compress(~np.isnan(x), x) + tgt.append(np.median(nonan, overwrite_input=True)) + + assert_array_equal(np.nanmedian(d, axis=-1), tgt) + + def test_result_values(self): + tgt = [np.median(d) for d in _rdat] + res = np.nanmedian(_ndat, axis=1) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", _TYPE_CODES) + def test_allnans(self, dtype, axis): + mat = np.full((3, 3), np.nan).astype(dtype) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + + output = np.nanmedian(mat, axis=axis) + assert output.dtype == mat.dtype + assert np.isnan(output).all() + + if axis is None: + assert_(len(sup.log) == 1) + else: + assert_(len(sup.log) == 3) + + # Check scalar + scalar = np.array(np.nan).astype(dtype)[()] + output_scalar = np.nanmedian(scalar) + assert output_scalar.dtype == scalar.dtype + assert np.isnan(output_scalar) + + if axis is None: + assert_(len(sup.log) == 2) + else: + assert_(len(sup.log) == 4) + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanmedian(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_(np.nanmedian(0.) == 0.) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.nanmedian, d, axis=-5) + assert_raises(AxisError, np.nanmedian, d, axis=(0, -5)) + assert_raises(AxisError, np.nanmedian, d, axis=4) + assert_raises(AxisError, np.nanmedian, d, axis=(0, 4)) + assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) + + def test_float_special(self): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) + assert_equal(np.nanmedian(a, axis=1), [inf, np.nan]) + assert_equal(np.nanmedian(a), inf) + + # minimum fill value check + a = np.array([[np.nan, np.nan, inf], + [np.nan, np.nan, inf]]) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf]) + assert_equal(np.nanmedian(a, axis=1), inf) + + # no mask path + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.nanmedian(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + if inf > 0: + assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.nanmedian(a), 4.5) + else: + assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.nanmedian(a), -2.5) + assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf]) + + for i in range(0, 10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=1), inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [inf] * j) + + a = np.array([([np.nan] * i) + ([-inf] * j)] * 2) + assert_equal(np.nanmedian(a), -inf) + assert_equal(np.nanmedian(a, axis=1), -inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [-inf] * j) + + +class TestNanFunctions_Percentile: + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanpercentile(ndat, 30) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.percentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + res = np.nanpercentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = np.nanpercentile(d, 90, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanpercentile(d, 90, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + @pytest.mark.parametrize('q', [7, [1, 7]]) + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1,), + (0, 1), + (-3, -1), + ] + ) + @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning") + def test_keepdims_out(self, q, axis): + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + shape_out = np.shape(q) + shape_out + + out = np.empty(shape_out) + result = np.nanpercentile(d, q, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + @pytest.mark.parametrize("weighted", [False, True]) + def test_out(self, weighted): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + if weighted: + w_args = {"weights": np.ones_like(mat), "method": "inverted_cdf"} + nan_w_args = { + "weights": np.ones_like(nan_mat), "method": "inverted_cdf" + } + else: + w_args = dict() + nan_w_args = dict() + tgt = np.percentile(mat, 42, axis=1, **w_args) + res = np.nanpercentile(nan_mat, 42, axis=1, out=resout, **nan_w_args) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.percentile(mat, 42, axis=None, **w_args) + res = np.nanpercentile( + nan_mat, 42, axis=None, out=resout, **nan_w_args + ) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanpercentile( + nan_mat, 42, axis=(0, 1), out=resout, **nan_w_args + ) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_complex(self): + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + + @pytest.mark.parametrize("weighted", [False, True]) + @pytest.mark.parametrize("use_out", [False, True]) + def test_result_values(self, weighted, use_out): + if weighted: + percentile = partial(np.percentile, method="inverted_cdf") + nanpercentile = partial(np.nanpercentile, method="inverted_cdf") + + def gen_weights(d): + return np.ones_like(d) + + else: + percentile = np.percentile + nanpercentile = np.nanpercentile + + def gen_weights(d): + return None + + tgt = [percentile(d, 28, weights=gen_weights(d)) for d in _rdat] + out = np.empty_like(tgt) if use_out else None + res = nanpercentile(_ndat, 28, axis=1, + weights=gen_weights(_ndat), out=out) + assert_almost_equal(res, tgt) + # Transpose the array to fit the output convention of numpy.percentile + tgt = np.transpose([percentile(d, (28, 98), weights=gen_weights(d)) + for d in _rdat]) + out = np.empty_like(tgt) if use_out else None + res = nanpercentile(_ndat, (28, 98), axis=1, + weights=gen_weights(_ndat), out=out) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): + out = np.nanpercentile(array, 60, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_equal(np.nanpercentile(0., 100), 0.) + a = np.arange(6) + r = np.nanpercentile(a, 50, axis=0) + assert_equal(r, 2.5) + assert_(np.isscalar(r)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=-5) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=(0, -5)) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=4) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=(0, 4)) + assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1)) + + def test_multiple_percentiles(self): + perc = [50, 100] + mat = np.ones((4, 3)) + nan_mat = np.nan * mat + # For checking consistency in higher dimensional case + large_mat = np.ones((3, 4, 5)) + large_mat[:, 0:2:4, :] = 0 + large_mat[:, :, 3:] *= 2 + for axis in [None, 0, 1]: + for keepdim in [False, True]: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "All-NaN slice encountered") + val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) + nan_val = np.nanpercentile(nan_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val.shape, val.shape) + + val = np.percentile(large_mat, perc, axis=axis, + keepdims=keepdim) + nan_val = np.nanpercentile(large_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val, val) + + megamat = np.ones((3, 4, 5, 6)) + assert_equal( + np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6) + ) + + @pytest.mark.parametrize("nan_weight", [0, 1, 2, 3, 1e200]) + def test_nan_value_with_weight(self, nan_weight): + x = [1, np.nan, 2, 3] + result = np.float64(2.0) + q_unweighted = np.nanpercentile(x, 50, method="inverted_cdf") + assert_equal(q_unweighted, result) + + # The weight value at the nan position should not matter. + w = [1.0, nan_weight, 1.0, 1.0] + q_weighted = np.nanpercentile(x, 50, weights=w, method="inverted_cdf") + assert_equal(q_weighted, result) + + @pytest.mark.parametrize("axis", [0, 1, 2]) + def test_nan_value_with_weight_ndim(self, axis): + # Create a multi-dimensional array to test + np.random.seed(1) + x_no_nan = np.random.random(size=(100, 99, 2)) + # Set some places to NaN (not particularly smart) so there is always + # some non-Nan. + x = x_no_nan.copy() + x[np.arange(99), np.arange(99), 0] = np.nan + + p = np.array([[20., 50., 30], [70, 33, 80]]) + + # We just use ones as weights, but replace it with 0 or 1e200 at the + # NaN positions below. + weights = np.ones_like(x) + + # For comparison use weighted normal percentile with nan weights at + # 0 (and no NaNs); not sure this is strictly identical but should be + # sufficiently so (if a percentile lies exactly on a 0 value). + weights[np.isnan(x)] = 0 + p_expected = np.percentile( + x_no_nan, p, axis=axis, weights=weights, method="inverted_cdf") + + p_unweighted = np.nanpercentile( + x, p, axis=axis, method="inverted_cdf") + # The normal and unweighted versions should be identical: + assert_equal(p_unweighted, p_expected) + + weights[np.isnan(x)] = 1e200 # huge value, shouldn't matter + p_weighted = np.nanpercentile( + x, p, axis=axis, weights=weights, method="inverted_cdf") + assert_equal(p_weighted, p_expected) + # Also check with out passed: + out = np.empty_like(p_weighted) + res = np.nanpercentile( + x, p, axis=axis, weights=weights, out=out, method="inverted_cdf") + + assert res is out + assert_equal(out, p_expected) + + +class TestNanFunctions_Quantile: + # most of this is already tested by TestPercentile + + @pytest.mark.parametrize("weighted", [False, True]) + def test_regression(self, weighted): + ar = np.arange(24).reshape(2, 3, 4).astype(float) + ar[0][1] = np.nan + if weighted: + w_args = {"weights": np.ones_like(ar), "method": "inverted_cdf"} + else: + w_args = dict() + + assert_equal(np.nanquantile(ar, q=0.5, **w_args), + np.nanpercentile(ar, q=50, **w_args)) + assert_equal(np.nanquantile(ar, q=0.5, axis=0, **w_args), + np.nanpercentile(ar, q=50, axis=0, **w_args)) + assert_equal(np.nanquantile(ar, q=0.5, axis=1, **w_args), + np.nanpercentile(ar, q=50, axis=1, **w_args)) + assert_equal(np.nanquantile(ar, q=[0.5], axis=1, **w_args), + np.nanpercentile(ar, q=[50], axis=1, **w_args)) + assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1, **w_args), + np.nanpercentile(ar, q=[25, 50, 75], axis=1, **w_args)) + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.nanquantile(x, 0), 0.) + assert_equal(np.nanquantile(x, 1), 3.5) + assert_equal(np.nanquantile(x, 0.5), 1.75) + + def test_complex(self): + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.nanquantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.nanquantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): + out = np.nanquantile(array, 1, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + +@pytest.mark.parametrize("arr, expected", [ + # array of floats with some nans + (np.array([np.nan, 5.0, np.nan, np.inf]), + np.array([False, True, False, True])), + # int64 array that can't possibly have nans + (np.array([1, 5, 7, 9], dtype=np.int64), + True), + # bool array that can't possibly have nans + (np.array([False, True, False, True]), + True), + # 2-D complex array with nans + (np.array([[np.nan, 5.0], + [np.nan, np.inf]], dtype=np.complex64), + np.array([[False, True], + [False, True]])), + ]) +def test__nan_mask(arr, expected): + for out in [None, np.empty(arr.shape, dtype=np.bool)]: + actual = _nan_mask(arr, out=out) + assert_equal(actual, expected) + # the above won't distinguish between True proper + # and an array of True values; we want True proper + # for types that can't possibly contain NaN + if type(expected) is not np.ndarray: + assert actual is True + + +def test__replace_nan(): + """ Test that _replace_nan returns the original array if there are no + NaNs, not a copy. + """ + for dtype in [np.bool, np.int32, np.int64]: + arr = np.array([0, 1], dtype=dtype) + result, mask = _replace_nan(arr, 0) + assert mask is None + # do not make a copy if there are no nans + assert result is arr + + for dtype in [np.float32, np.float64]: + arr = np.array([0, 1], dtype=dtype) + result, mask = _replace_nan(arr, 2) + assert (mask == False).all() + # mask is not None, so we make a copy + assert result is not arr + assert_equal(result, arr) + + arr_nan = np.array([0, 1, np.nan], dtype=dtype) + result_nan, mask_nan = _replace_nan(arr_nan, 2) + assert_equal(mask_nan, np.array([False, False, True])) + assert result_nan is not arr_nan + assert_equal(result_nan, np.array([0, 1, 2])) + assert np.isnan(arr_nan[-1]) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_packbits.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_packbits.py new file mode 100644 index 0000000000000000000000000000000000000000..c2f1d48d4b6a5a60d603198c253e8265f4ec6346 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_packbits.py @@ -0,0 +1,376 @@ +import numpy as np +from numpy.testing import assert_array_equal, assert_equal, assert_raises +import pytest +from itertools import chain + +def test_packbits(): + # Copied from the docstring. + a = [[[1, 0, 1], [0, 1, 0]], + [[1, 1, 0], [0, 0, 1]]] + for dt in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dt) + b = np.packbits(arr, axis=-1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]])) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_empty(): + shapes = [ + (0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0), + (0, 0, 20), (0, 0, 0), + ] + for dt in '?bBhHiIlLqQ': + for shape in shapes: + a = np.empty(shape, dtype=dt) + b = np.packbits(a) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, (0,)) + + +def test_packbits_empty_with_axis(): + # Original shapes and lists of packed shapes for different axes. + shapes = [ + ((0,), [(0,)]), + ((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]), + ((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]), + ((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]), + ((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]), + ((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]), + ((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]), + ((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]), + ] + for dt in '?bBhHiIlLqQ': + for in_shape, out_shapes in shapes: + for ax, out_shape in enumerate(out_shapes): + a = np.empty(in_shape, dtype=dt) + b = np.packbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + +@pytest.mark.parametrize('bitorder', ('little', 'big')) +def test_packbits_large(bitorder): + # test data large enough for 16 byte vectorization + a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, + 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, + 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, + 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, + 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, + 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, + 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, + 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, + 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, + 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0]) + a = a.repeat(3) + for dtype in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + b = np.packbits(arr, axis=None, bitorder=bitorder) + assert_equal(b.dtype, np.uint8) + r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252, + 113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255, + 227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63, + 224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112, + 63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1, + 255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15, + 199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227, + 129, 248, 227, 129, 199, 31, 128] + if bitorder == 'big': + assert_array_equal(b, r) + # equal for size being multiple of 8 + assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a) + + # check last byte of different remainders (16 byte vectorization) + b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)] + assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199, + 198, 196, 192]) + + + arr = arr.reshape(36, 25) + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195, + 199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105, + 107, 75, 74, 88], + [72, 216, 248, 241, 227, 195, 202, 90, 90, 83, + 83, 119, 127, 109, 73, 64, 208, 244, 189, 45, + 41, 104, 122, 90, 18], + [113, 120, 248, 216, 152, 24, 60, 52, 182, 150, + 150, 150, 146, 210, 210, 246, 255, 255, 223, + 151, 21, 17, 17, 131, 163], + [214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92, + 92, 78, 110, 39, 181, 149, 220, 222, 218, 218, + 202, 234, 170, 168], + [0, 128, 128, 192, 80, 112, 48, 160, 160, 224, + 240, 208, 144, 128, 160, 224, 240, 208, 144, + 144, 176, 240, 224, 192, 128]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 127, 192, 0], + [ 7, 252, 15, 128], + [240, 0, 28, 0], + [255, 128, 0, 128], + [192, 31, 255, 128], + [142, 63, 0, 0], + [255, 240, 7, 0], + [ 7, 224, 14, 0], + [126, 0, 224, 0], + [255, 255, 199, 0], + [ 56, 28, 126, 0], + [113, 248, 227, 128], + [227, 142, 63, 0], + [ 0, 28, 112, 0], + [ 15, 248, 3, 128], + [ 28, 126, 56, 0], + [ 56, 255, 241, 128], + [240, 7, 224, 0], + [227, 129, 192, 128], + [255, 255, 254, 0], + [126, 0, 224, 0], + [ 3, 241, 248, 0], + [ 0, 255, 241, 128], + [128, 0, 255, 128], + [224, 1, 255, 128], + [248, 252, 126, 0], + [ 0, 7, 3, 128], + [224, 113, 248, 0], + [ 0, 252, 127, 128], + [142, 63, 224, 0], + [224, 14, 63, 0], + [ 7, 3, 128, 0], + [113, 255, 255, 128], + [ 28, 113, 199, 0], + [ 7, 227, 142, 0], + [ 14, 56, 252, 0]]) + + arr = arr.T.copy() + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255, + 56, 113, 227, 0, 15, 28, 56, 240, 227, 255, + 126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224, + 7, 113, 28, 7, 14], + [127, 252, 0, 128, 31, 63, 240, 224, 0, 255, + 28, 248, 142, 28, 248, 126, 255, 7, 129, 255, + 0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14, + 3, 255, 113, 227, 56], + [192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126, + 227, 63, 112, 3, 56, 241, 224, 192, 254, 224, + 248, 241, 255, 255, 126, 3, 248, 127, 224, 63, + 128, 255, 199, 142, 252], + [0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0, + 0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128, + 128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 72, 113, 214, 0], + [186, 216, 120, 210, 128], + [178, 248, 248, 210, 128], + [178, 241, 216, 64, 192], + [150, 227, 152, 68, 80], + [215, 195, 24, 5, 112], + [ 87, 202, 60, 5, 48], + [ 83, 90, 52, 1, 160], + [ 83, 90, 182, 72, 160], + [195, 83, 150, 88, 224], + [199, 83, 150, 92, 240], + [206, 119, 150, 92, 208], + [204, 127, 146, 78, 144], + [204, 109, 210, 110, 128], + [140, 73, 210, 39, 160], + [140, 64, 246, 181, 224], + [136, 208, 255, 149, 240], + [136, 244, 255, 220, 208], + [ 8, 189, 223, 222, 144], + [ 40, 45, 151, 218, 144], + [105, 41, 21, 218, 176], + [107, 104, 17, 202, 240], + [ 75, 122, 17, 234, 224], + [ 74, 90, 131, 170, 192], + [ 88, 18, 163, 168, 128]]) + + + # result is the same if input is multiplied with a nonzero value + for dtype in 'bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + rnd = np.random.randint(low=np.iinfo(dtype).min, + high=np.iinfo(dtype).max, size=arr.size, + dtype=dtype) + rnd[rnd == 0] = 1 + arr *= rnd.astype(dtype) + b = np.packbits(arr, axis=-1) + assert_array_equal(np.unpackbits(b)[:-4], a) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_very_large(): + # test some with a larger arrays gh-8637 + # code is covered earlier but larger array makes crash on bug more likely + for s in range(950, 1050): + for dt in '?bBhHiIlLqQ': + x = np.ones((200, s), dtype=bool) + np.packbits(x, axis=1) + + +def test_unpackbits(): + # Copied from the docstring. + a = np.array([[2], [7], [23]], dtype=np.uint8) + b = np.unpackbits(a, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]])) + +def test_pack_unpack_order(): + a = np.array([[2], [7], [23]], dtype=np.uint8) + b = np.unpackbits(a, axis=1) + assert_equal(b.dtype, np.uint8) + b_little = np.unpackbits(a, axis=1, bitorder='little') + b_big = np.unpackbits(a, axis=1, bitorder='big') + assert_array_equal(b, b_big) + assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little')) + assert_array_equal(b[:,::-1], b_little) + assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big')) + assert_raises(ValueError, np.unpackbits, a, bitorder='r') + assert_raises(TypeError, np.unpackbits, a, bitorder=10) + + + +def test_unpackbits_empty(): + a = np.empty((0,), dtype=np.uint8) + b = np.unpackbits(a) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.empty((0,))) + + +def test_unpackbits_empty_with_axis(): + # Lists of packed shapes for different axes and unpacked shapes. + shapes = [ + ([(0,)], (0,)), + ([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)), + ([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)), + ([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)), + ([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)), + ([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)), + ([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)), + ([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)), + ] + for in_shapes, out_shape in shapes: + for ax, in_shape in enumerate(in_shapes): + a = np.empty(in_shape, dtype=np.uint8) + b = np.unpackbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + + +def test_unpackbits_large(): + # test all possible numbers via comparison to already tested packbits + d = np.arange(277, dtype=np.uint8) + assert_array_equal(np.packbits(np.unpackbits(d)), d) + assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2]) + d = np.tile(d, (3, 1)) + assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d) + d = d.T.copy() + assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d) + + +class TestCount(): + x = np.array([ + [1, 0, 1, 0, 0, 1, 0], + [0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 1], + [1, 1, 0, 0, 0, 1, 1], + [1, 0, 1, 0, 1, 0, 1], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 0, 1, 0, 1, 0], + ], dtype=np.uint8) + padded1 = np.zeros(57, dtype=np.uint8) + padded1[:49] = x.ravel() + padded1b = np.zeros(57, dtype=np.uint8) + padded1b[:49] = x[::-1].copy().ravel() + padded2 = np.zeros((9, 9), dtype=np.uint8) + padded2[:7, :7] = x + + @pytest.mark.parametrize('bitorder', ('little', 'big')) + @pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1))) + def test_roundtrip(self, bitorder, count): + if count < 0: + # one extra zero of padding + cutoff = count - 1 + else: + cutoff = count + # test complete invertibility of packbits and unpackbits with count + packed = np.packbits(self.x, bitorder=bitorder) + unpacked = np.unpackbits(packed, count=count, bitorder=bitorder) + assert_equal(unpacked.dtype, np.uint8) + assert_array_equal(unpacked, self.padded1[:cutoff]) + + @pytest.mark.parametrize('kwargs', [ + {}, {'count': None}, + ]) + def test_count(self, kwargs): + packed = np.packbits(self.x) + unpacked = np.unpackbits(packed, **kwargs) + assert_equal(unpacked.dtype, np.uint8) + assert_array_equal(unpacked, self.padded1[:-1]) + + @pytest.mark.parametrize('bitorder', ('little', 'big')) + # delta==-1 when count<0 because one extra zero of padding + @pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1))) + def test_roundtrip_axis(self, bitorder, count): + if count < 0: + # one extra zero of padding + cutoff = count - 1 + else: + cutoff = count + packed0 = np.packbits(self.x, axis=0, bitorder=bitorder) + unpacked0 = np.unpackbits(packed0, axis=0, count=count, + bitorder=bitorder) + assert_equal(unpacked0.dtype, np.uint8) + assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]]) + + packed1 = np.packbits(self.x, axis=1, bitorder=bitorder) + unpacked1 = np.unpackbits(packed1, axis=1, count=count, + bitorder=bitorder) + assert_equal(unpacked1.dtype, np.uint8) + assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff]) + + @pytest.mark.parametrize('kwargs', [ + {}, {'count': None}, + {'bitorder' : 'little'}, + {'bitorder': 'little', 'count': None}, + {'bitorder' : 'big'}, + {'bitorder': 'big', 'count': None}, + ]) + def test_axis_count(self, kwargs): + packed0 = np.packbits(self.x, axis=0) + unpacked0 = np.unpackbits(packed0, axis=0, **kwargs) + assert_equal(unpacked0.dtype, np.uint8) + if kwargs.get('bitorder', 'big') == 'big': + assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]]) + else: + assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]]) + + packed1 = np.packbits(self.x, axis=1) + unpacked1 = np.unpackbits(packed1, axis=1, **kwargs) + assert_equal(unpacked1.dtype, np.uint8) + if kwargs.get('bitorder', 'big') == 'big': + assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1]) + else: + assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1]) + + def test_bad_count(self): + packed0 = np.packbits(self.x, axis=0) + assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9) + packed1 = np.packbits(self.x, axis=1) + assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9) + packed = np.packbits(self.x) + assert_raises(ValueError, np.unpackbits, packed, count=-57) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_polynomial.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_polynomial.py new file mode 100644 index 0000000000000000000000000000000000000000..33109dae1c72affe454f3edcf5c0d7e97702b468 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_polynomial.py @@ -0,0 +1,303 @@ +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_allclose + ) + +import pytest + +# `poly1d` has some support for `np.bool` and `np.timedelta64`, +# but it is limited and they are therefore excluded here +TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O" + + +class TestPolynomial: + def test_poly1d_str_and_repr(self): + p = np.poly1d([1., 2, 3]) + assert_equal(repr(p), 'poly1d([1., 2., 3.])') + assert_equal(str(p), + ' 2\n' + '1 x + 2 x + 3') + + q = np.poly1d([3., 2, 1]) + assert_equal(repr(q), 'poly1d([3., 2., 1.])') + assert_equal(str(q), + ' 2\n' + '3 x + 2 x + 1') + + r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j]) + assert_equal(str(r), + ' 3 2\n' + '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)') + + assert_equal(str(np.poly1d([-3, -2, -1])), + ' 2\n' + '-3 x - 2 x - 1') + + def test_poly1d_resolution(self): + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p(0), 3.0) + assert_equal(p(5), 38.0) + assert_equal(q(0), 1.0) + assert_equal(q(5), 86.0) + + def test_poly1d_math(self): + # here we use some simple coeffs to make calculations easier + p = np.poly1d([1., 2, 4]) + q = np.poly1d([4., 2, 1]) + assert_equal(p/q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75]))) + assert_equal(p.integ(), np.poly1d([1/3, 1., 4., 0.])) + assert_equal(p.integ(1), np.poly1d([1/3, 1., 4., 0.])) + + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) + assert_equal(p + q, np.poly1d([4., 4., 4.])) + assert_equal(p - q, np.poly1d([-2., 0., 2.])) + assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.])) + assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) + assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) + assert_equal(p.deriv(), np.poly1d([2., 2.])) + assert_equal(p.deriv(2), np.poly1d([2.])) + assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])), + (np.poly1d([1., -1.]), np.poly1d([0.]))) + + @pytest.mark.parametrize("type_code", TYPE_CODES) + def test_poly1d_misc(self, type_code: str) -> None: + dtype = np.dtype(type_code) + ar = np.array([1, 2, 3], dtype=dtype) + p = np.poly1d(ar) + + # `__eq__` + assert_equal(np.asarray(p), ar) + assert_equal(np.asarray(p).dtype, dtype) + assert_equal(len(p), 2) + + # `__getitem__` + comparison_dct = {-1: 0, 0: 3, 1: 2, 2: 1, 3: 0} + for index, ref in comparison_dct.items(): + scalar = p[index] + assert_equal(scalar, ref) + if dtype == np.object_: + assert isinstance(scalar, int) + else: + assert_equal(scalar.dtype, dtype) + + def test_poly1d_variable_arg(self): + q = np.poly1d([1., 2, 3], variable='y') + assert_equal(str(q), + ' 2\n' + '1 y + 2 y + 3') + q = np.poly1d([1., 2, 3], variable='lambda') + assert_equal(str(q), + ' 2\n' + '1 lambda + 2 lambda + 3') + + def test_poly(self): + assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]), + [1, -3, -2, 6]) + + # From matlab docs + A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]] + assert_array_almost_equal(np.poly(A), [1, -6, -72, -27]) + + # Should produce real output for perfect conjugates + assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j]))) + assert_(np.isrealobj(np.poly([0+1j, -0+-1j, 1+2j, + 1-2j, 1.+3.5j, 1-3.5j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j, 1+3j, 1-3.j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j]))) + assert_(np.isrealobj(np.poly([1j, -1j]))) + assert_(np.isrealobj(np.poly([1, -1]))) + + assert_(np.iscomplexobj(np.poly([1j, -1.0000001j]))) + + np.random.seed(42) + a = np.random.randn(100) + 1j*np.random.randn(100) + assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a)))))) + + def test_roots(self): + assert_array_equal(np.roots([1, 0, 0]), [0, 0]) + + def test_str_leading_zeros(self): + p = np.poly1d([4, 3, 2, 1]) + p[3] = 0 + assert_equal(str(p), + " 2\n" + "3 x + 2 x + 1") + + p = np.poly1d([1, 2]) + p[0] = 0 + p[1] = 0 + assert_equal(str(p), " \n0") + + def test_polyfit(self): + c = np.array([3., 2., 1.]) + x = np.linspace(0, 2, 7) + y = np.polyval(c, x) + err = [1, -1, 1, -1, 1, -1, 1] + weights = np.arange(8, 1, -1)**2/7.0 + + # Check exception when too few points for variance estimate. Note that + # the estimate requires the number of data points to exceed + # degree + 1 + assert_raises(ValueError, np.polyfit, + [1], [1], deg=0, cov=True) + + # check 1D case + m, cov = np.polyfit(x, y+err, 2, cov=True) + est = [3.8571, 0.2857, 1.619] + assert_almost_equal(est, m, decimal=4) + val0 = [[ 1.4694, -2.9388, 0.8163], + [-2.9388, 6.3673, -2.1224], + [ 0.8163, -2.1224, 1.161 ]] + assert_almost_equal(val0, cov, decimal=4) + + m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True) + assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) + val = [[ 4.3964, -5.0052, 0.4878], + [-5.0052, 6.8067, -0.9089], + [ 0.4878, -0.9089, 0.3337]] + assert_almost_equal(val, cov2, decimal=4) + + m3, cov3 = np.polyfit(x, y+err, 2, w=weights, cov="unscaled") + assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4) + val = [[ 0.1473, -0.1677, 0.0163], + [-0.1677, 0.228 , -0.0304], + [ 0.0163, -0.0304, 0.0112]] + assert_almost_equal(val, cov3, decimal=4) + + # check 2D (n,1) case + y = y[:, np.newaxis] + c = c[:, np.newaxis] + assert_almost_equal(c, np.polyfit(x, y, 2)) + # check 2D (n,2) case + yy = np.concatenate((y, y), axis=1) + cc = np.concatenate((c, c), axis=1) + assert_almost_equal(cc, np.polyfit(x, yy, 2)) + + m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) + assert_almost_equal(est, m[:, 0], decimal=4) + assert_almost_equal(est, m[:, 1], decimal=4) + assert_almost_equal(val0, cov[:, :, 0], decimal=4) + assert_almost_equal(val0, cov[:, :, 1], decimal=4) + + # check order 1 (deg=0) case, were the analytic results are simple + np.random.seed(123) + y = np.random.normal(size=(4, 10000)) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True) + # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5. + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # Without scaling, since reduced chi2 is 1, the result should be the same. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]), + deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.5) + # If we estimate our errors wrong, no change with scaling: + w = np.full(y.shape[0], 1./0.5) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True) + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # But if we do not scale, our estimate for the error in the mean will + # differ. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.25) + + def test_objects(self): + from decimal import Decimal + p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) + p2 = p * Decimal('1.333333333333333') + assert_(p2[1] == Decimal("3.9999999999999990")) + p2 = p.deriv() + assert_(p2[1] == Decimal('8.0')) + p2 = p.integ() + assert_(p2[3] == Decimal("1.333333333333333333333333333")) + assert_(p2[2] == Decimal('1.5')) + assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) + p = np.poly([Decimal(1), Decimal(2)]) + assert_equal(np.poly([Decimal(1), Decimal(2)]), + [1, Decimal(-3), Decimal(2)]) + + def test_complex(self): + p = np.poly1d([3j, 2j, 1j]) + p2 = p.integ() + assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) + p2 = p.deriv() + assert_((p2.coeffs == [6j, 2j]).all()) + + def test_integ_coeffs(self): + p = np.poly1d([3, 2, 1]) + p2 = p.integ(3, k=[9, 7, 6]) + assert_( + (p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all()) + + def test_zero_dims(self): + try: + np.poly(np.zeros((0, 0))) + except ValueError: + pass + + def test_poly_int_overflow(self): + """ + Regression test for gh-5096. + """ + v = np.arange(1, 21) + assert_almost_equal(np.poly(v), np.poly(np.diag(v))) + + def test_zero_poly_dtype(self): + """ + Regression test for gh-16354. + """ + z = np.array([0, 0, 0]) + p = np.poly1d(z.astype(np.int64)) + assert_equal(p.coeffs.dtype, np.int64) + + p = np.poly1d(z.astype(np.float32)) + assert_equal(p.coeffs.dtype, np.float32) + + p = np.poly1d(z.astype(np.complex64)) + assert_equal(p.coeffs.dtype, np.complex64) + + def test_poly_eq(self): + p = np.poly1d([1, 2, 3]) + p2 = np.poly1d([1, 2, 4]) + assert_equal(p == None, False) + assert_equal(p != None, True) + assert_equal(p == p, True) + assert_equal(p == p2, False) + assert_equal(p != p2, True) + + def test_polydiv(self): + b = np.poly1d([2, 6, 6, 1]) + a = np.poly1d([-1j, (1+2j), -(2+1j), 1]) + q, r = np.polydiv(b, a) + assert_equal(q.coeffs.dtype, np.complex128) + assert_equal(r.coeffs.dtype, np.complex128) + assert_equal(q*a + r, b) + + c = [1, 2, 3] + d = np.poly1d([1, 2, 3]) + s, t = np.polydiv(c, d) + assert isinstance(s, np.poly1d) + assert isinstance(t, np.poly1d) + u, v = np.polydiv(d, c) + assert isinstance(u, np.poly1d) + assert isinstance(v, np.poly1d) + + def test_poly_coeffs_mutable(self): + """ Coefficients should be modifiable """ + p = np.poly1d([1, 2, 3]) + + p.coeffs += 1 + assert_equal(p.coeffs, [2, 3, 4]) + + p.coeffs[2] += 10 + assert_equal(p.coeffs, [2, 3, 14]) + + # this never used to be allowed - let's not add features to deprecated + # APIs + assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1)) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_recfunctions.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_recfunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..c81b6a190a60270d99b2e41ac09fe11282a6b809 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_recfunctions.py @@ -0,0 +1,1043 @@ +import pytest + +import numpy as np +import numpy.ma as ma +from numpy.ma.mrecords import MaskedRecords +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_, assert_raises +from numpy.lib.recfunctions import ( + drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields, + find_duplicates, merge_arrays, append_fields, stack_arrays, join_by, + repack_fields, unstructured_to_structured, structured_to_unstructured, + apply_along_fields, require_fields, assign_fields_by_name) +get_fieldspec = np.lib.recfunctions._get_fieldspec +get_names = np.lib.recfunctions.get_names +get_names_flat = np.lib.recfunctions.get_names_flat +zip_descr = np.lib.recfunctions._zip_descr +zip_dtype = np.lib.recfunctions._zip_dtype + + +class TestRecFunctions: + # Misc tests + + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array([('A', 1.), ('B', 2.)], + dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_zip_descr(self): + # Test zip_descr + (w, x, y, z) = self.data + + # Std array + test = zip_descr((x, x), flatten=True) + assert_equal(test, + np.dtype([('', int), ('', int)])) + test = zip_descr((x, x), flatten=False) + assert_equal(test, + np.dtype([('', int), ('', int)])) + + # Std & flexible-dtype + test = zip_descr((x, z), flatten=True) + assert_equal(test, + np.dtype([('', int), ('A', '|S3'), ('B', float)])) + test = zip_descr((x, z), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('A', '|S3'), ('B', float)])])) + + # Standard & nested dtype + test = zip_descr((x, w), flatten=True) + assert_equal(test, + np.dtype([('', int), + ('a', int), + ('ba', float), ('bb', int)])) + test = zip_descr((x, w), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('a', int), + ('b', [('ba', float), ('bb', int)])])])) + + def test_drop_fields(self): + # Test drop_fields + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + + # A basic field + test = drop_fields(a, 'a') + control = np.array([((2, 3.0),), ((5, 6.0),)], + dtype=[('b', [('ba', float), ('bb', int)])]) + assert_equal(test, control) + + # Another basic field (but nesting two fields) + test = drop_fields(a, 'b') + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # A nested sub-field + test = drop_fields(a, ['ba', ]) + control = np.array([(1, (3.0,)), (4, (6.0,))], + dtype=[('a', int), ('b', [('bb', int)])]) + assert_equal(test, control) + + # All the nested sub-field from a field: zap that field + test = drop_fields(a, ['ba', 'bb']) + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # dropping all fields results in an array with no fields + test = drop_fields(a, ['a', 'b']) + control = np.array([(), ()], dtype=[]) + assert_equal(test, control) + + def test_rename_fields(self): + # Test rename fields + a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + dtype=[('a', int), + ('b', [('ba', float), ('bb', (float, 2))])]) + test = rename_fields(a, {'a': 'A', 'bb': 'BB'}) + newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] + control = a.view(newdtype) + assert_equal(test.dtype, newdtype) + assert_equal(test, control) + + def test_get_names(self): + # Test get_names + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ('ba', 'bb')))) + + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ()))) + + ndtype = np.dtype([]) + test = get_names(ndtype) + assert_equal(test, ()) + + def test_get_names_flat(self): + # Test get_names_flat + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names_flat(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b', 'ba', 'bb')) + + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b')) + + ndtype = np.dtype([]) + test = get_names_flat(ndtype) + assert_equal(test, ()) + + def test_get_fieldstructure(self): + # Test get_fieldstructure + + # No nested fields + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': []}) + + # One 1-nested field + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']}) + + # One 2-nested fields + ndtype = np.dtype([('A', int), + ('B', [('BA', int), + ('BB', [('BBA', int), ('BBB', int)])])]) + test = get_fieldstructure(ndtype) + control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], + 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + assert_equal(test, control) + + # 0 fields + ndtype = np.dtype([]) + test = get_fieldstructure(ndtype) + assert_equal(test, {}) + + def test_find_duplicates(self): + # Test find_duplicates + a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), + (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], + mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), + (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], + dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 2] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='A', return_index=True) + control = [0, 1, 2, 3, 5] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='B', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BA', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BB', return_index=True) + control = [0, 1, 2, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_find_duplicates_ignoremask(self): + # Test the ignoremask option of find_duplicates + ndtype = [('a', int)] + a = ma.array([1, 1, 1, 2, 2, 3, 3], + mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + test = find_duplicates(a, ignoremask=True, return_index=True) + control = [0, 1, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 1, 2, 3, 4, 6] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_repack_fields(self): + dt = np.dtype('u1,f4,i8', align=True) + a = np.zeros(2, dtype=dt) + + assert_equal(repack_fields(dt), np.dtype('u1,f4,i8')) + assert_equal(repack_fields(a).itemsize, 13) + assert_equal(repack_fields(repack_fields(dt), align=True), dt) + + # make sure type is preserved + dt = np.dtype((np.record, dt)) + assert_(repack_fields(dt).type is np.record) + + def test_structured_to_unstructured(self, tmp_path): + a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + out = structured_to_unstructured(a) + assert_equal(out, np.zeros((4,5), dtype='f8')) + + b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1) + assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ])) + out = np.mean(structured_to_unstructured(b[['x']]), axis=-1) + assert_equal(out, np.array([ 1. , 4. , 7. , 10. ])) + + c = np.arange(20).reshape((4,5)) + out = unstructured_to_structured(c, a.dtype) + want = np.array([( 0, ( 1., 2), [ 3., 4.]), + ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), + (15, (16., 17), [18., 19.])], + dtype=[('a', 'i4'), + ('b', [('f0', 'f4'), ('f1', 'u2')]), + ('c', 'f4', (2,))]) + assert_equal(out, want) + + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + assert_equal(apply_along_fields(np.mean, d), + np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ])) + assert_equal(apply_along_fields(np.mean, d[['x', 'z']]), + np.array([ 3. , 5.5, 9. , 11. ])) + + # check that for uniform field dtypes we get a view, not a copy: + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')]) + dd = structured_to_unstructured(d) + ddd = unstructured_to_structured(dd, d.dtype) + assert_(np.shares_memory(dd, d)) + assert_(np.shares_memory(ddd, d)) + + # check that reversing the order of attributes works + dd_attrib_rev = structured_to_unstructured(d[['z', 'x']]) + assert_equal(dd_attrib_rev, [[5, 1], [7, 4], [11, 7], [12, 10]]) + assert_(np.shares_memory(dd_attrib_rev, d)) + + # including uniform fields with subarrays unpacked + d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]), + (8, [9, 10], [[11, 12], [13, 14]])], + dtype=[('x0', 'i4'), ('x1', ('i4', 2)), + ('x2', ('i4', (2, 2)))]) + dd = structured_to_unstructured(d) + ddd = unstructured_to_structured(dd, d.dtype) + assert_(np.shares_memory(dd, d)) + assert_(np.shares_memory(ddd, d)) + + # check that reversing with sub-arrays works as expected + d_rev = d[::-1] + dd_rev = structured_to_unstructured(d_rev) + assert_equal(dd_rev, [[8, 9, 10, 11, 12, 13, 14], + [1, 2, 3, 4, 5, 6, 7]]) + + # check that sub-arrays keep the order of their values + d_attrib_rev = d[['x2', 'x1', 'x0']] + dd_attrib_rev = structured_to_unstructured(d_attrib_rev) + assert_equal(dd_attrib_rev, [[4, 5, 6, 7, 2, 3, 1], + [11, 12, 13, 14, 9, 10, 8]]) + + # with ignored field at the end + d = np.array([(1, [2, 3], [[4, 5], [6, 7]], 32), + (8, [9, 10], [[11, 12], [13, 14]], 64)], + dtype=[('x0', 'i4'), ('x1', ('i4', 2)), + ('x2', ('i4', (2, 2))), ('ignored', 'u1')]) + dd = structured_to_unstructured(d[['x0', 'x1', 'x2']]) + assert_(np.shares_memory(dd, d)) + assert_equal(dd, [[1, 2, 3, 4, 5, 6, 7], + [8, 9, 10, 11, 12, 13, 14]]) + + # test that nested fields with identical names don't break anything + point = np.dtype([('x', int), ('y', int)]) + triangle = np.dtype([('a', point), ('b', point), ('c', point)]) + arr = np.zeros(10, triangle) + res = structured_to_unstructured(arr, dtype=int) + assert_equal(res, np.zeros((10, 6), dtype=int)) + + + # test nested combinations of subarrays and structured arrays, gh-13333 + def subarray(dt, shape): + return np.dtype((dt, shape)) + + def structured(*dts): + return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)]) + + def inspect(dt, dtype=None): + arr = np.zeros((), dt) + ret = structured_to_unstructured(arr, dtype=dtype) + backarr = unstructured_to_structured(ret, dt) + return ret.shape, ret.dtype, backarr.dtype + + dt = structured(subarray(structured(np.int32, np.int32), 3)) + assert_equal(inspect(dt), ((6,), np.int32, dt)) + + dt = structured(subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((4,), np.int32, dt)) + + dt = structured(np.int32) + assert_equal(inspect(dt), ((1,), np.int32, dt)) + + dt = structured(np.int32, subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((5,), np.int32, dt)) + + dt = structured() + assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt)) + + # these currently don't work, but we may make it work in the future + assert_raises(NotImplementedError, structured_to_unstructured, + np.zeros(3, dt), dtype=np.int32) + assert_raises(NotImplementedError, unstructured_to_structured, + np.zeros((3,0), dtype=np.int32)) + + # test supported ndarray subclasses + d_plain = np.array([(1, 2), (3, 4)], dtype=[('a', 'i4'), ('b', 'i4')]) + dd_expected = structured_to_unstructured(d_plain, copy=True) + + # recarray + d = d_plain.view(np.recarray) + + dd = structured_to_unstructured(d, copy=False) + ddd = structured_to_unstructured(d, copy=True) + assert_(np.shares_memory(d, dd)) + assert_(type(dd) is np.recarray) + assert_(type(ddd) is np.recarray) + assert_equal(dd, dd_expected) + assert_equal(ddd, dd_expected) + + # memmap + d = np.memmap(tmp_path / 'memmap', + mode='w+', + dtype=d_plain.dtype, + shape=d_plain.shape) + d[:] = d_plain + dd = structured_to_unstructured(d, copy=False) + ddd = structured_to_unstructured(d, copy=True) + assert_(np.shares_memory(d, dd)) + assert_(type(dd) is np.memmap) + assert_(type(ddd) is np.memmap) + assert_equal(dd, dd_expected) + assert_equal(ddd, dd_expected) + + def test_unstructured_to_structured(self): + # test if dtype is the args of np.dtype + a = np.zeros((20, 2)) + test_dtype_args = [('x', float), ('y', float)] + test_dtype = np.dtype(test_dtype_args) + field1 = unstructured_to_structured(a, dtype=test_dtype_args) # now + field2 = unstructured_to_structured(a, dtype=test_dtype) # before + assert_equal(field1, field2) + + def test_field_assignment_by_name(self): + a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + newdt = [('b', 'f4'), ('c', 'u1')] + + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + + b = np.array([(1,2), (3,4)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype)) + + # test nested fields + a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])]) + newdt = [('a', [('c', 'u1')])] + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + b = np.array([((2,),), ((3,),)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype)) + + # test unstructured code path for 0d arrays + a, b = np.array(3), np.array(0) + assign_fields_by_name(b, a) + assert_equal(b[()], 3) + + +class TestRecursiveFillFields: + # Test recursive_fill_fields. + def test_simple_flexible(self): + # Test recursive_fill_fields on flexible-array + a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) + b = np.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = np.array([(1, 10.), (2, 20.), (0, 0.)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + def test_masked_flexible(self): + # Test recursive_fill_fields on masked flexible-array + a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], + dtype=[('A', int), ('B', float)]) + b = ma.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = ma.array([(1, 10.), (2, 20.), (0, 0.)], + mask=[(0, 1), (1, 0), (0, 0)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + +class TestMergeArrays: + # Test merge_arrays + + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array( + [(1, (2, 3.0, ())), (4, (5, 6.0, ()))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test merge_arrays on a single array. + (_, x, _, z) = self.data + + test = merge_arrays(x) + control = np.array([(1,), (2,)], dtype=[('f0', int)]) + assert_equal(test, control) + test = merge_arrays((x,)) + assert_equal(test, control) + + test = merge_arrays(z, flatten=False) + assert_equal(test, z) + test = merge_arrays(z, flatten=True) + assert_equal(test, z) + + def test_solo_w_flatten(self): + # Test merge_arrays on a single array w & w/o flattening + w = self.data[0] + test = merge_arrays(w, flatten=False) + assert_equal(test, w) + + test = merge_arrays(w, flatten=True) + control = np.array([(1, 2, 3.0), (4, 5, 6.0)], + dtype=[('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + def test_standard(self): + # Test standard & standard + # Test merge arrays + (_, x, y, _) = self.data + test = merge_arrays((x, y), usemask=False) + control = np.array([(1, 10), (2, 20), (-1, 30)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + + test = merge_arrays((x, y), usemask=True) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_flatten(self): + # Test standard & flexible + (_, x, _, z) = self.data + test = merge_arrays((x, z), flatten=True) + control = np.array([(1, 'A', 1.), (2, 'B', 2.)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + + test = merge_arrays((x, z), flatten=False) + control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], + dtype=[('f0', int), + ('f1', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + def test_flatten_wflexible(self): + # Test flatten standard & nested + (w, x, _, _) = self.data + test = merge_arrays((x, w), flatten=True) + control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], + dtype=[('f0', int), + ('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + test = merge_arrays((x, w), flatten=False) + controldtype = [('f0', int), + ('f1', [('a', int), + ('b', [('ba', float), ('bb', int), ('bc', [])])])] + control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], + dtype=controldtype) + assert_equal(test, control) + + def test_wmasked_arrays(self): + # Test merge_arrays masked arrays + (_, x, _, _) = self.data + mx = ma.array([1, 2, 3], mask=[1, 0, 0]) + test = merge_arrays((x, mx), usemask=True) + control = ma.array([(1, 1), (2, 2), (-1, 3)], + mask=[(0, 1), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + test = merge_arrays((x, mx), usemask=True, asrecarray=True) + assert_equal(test, control) + assert_(isinstance(test, MaskedRecords)) + + def test_w_singlefield(self): + # Test single field + test = merge_arrays((np.array([1, 2]).view([('a', int)]), + np.array([10., 20., 30.])),) + control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('a', int), ('f1', float)]) + assert_equal(test, control) + + def test_w_shorter_flex(self): + # Test merge_arrays w/ a shorter flexndarray. + z = self.data[-1] + + # Fixme, this test looks incomplete and broken + #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + # dtype=[('A', '|S3'), ('B', float), ('C', int)]) + #assert_equal(test, control) + + # Hack to avoid pyflakes warnings about unused variables + merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + dtype=[('A', '|S3'), ('B', float), ('C', int)]) + + def test_singlerecord(self): + (_, x, y, z) = self.data + test = merge_arrays((x[0], y[0], z[0]), usemask=False) + control = np.array([(1, 10, ('A', 1))], + dtype=[('f0', int), + ('f1', int), + ('f2', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + +class TestAppendFields: + # Test append_fields + + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_append_single(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, 'A', data=[10, 20, 30]) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('A', int)],) + assert_equal(test, control) + + def test_append_double(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) + control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], + mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], + dtype=[('f0', int), ('A', int), ('B', int)],) + assert_equal(test, control) + + def test_append_on_flex(self): + # Test append_fields on flexible type arrays + z = self.data[-1] + test = append_fields(z, 'C', data=[10, 20, 30]) + control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], + mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('C', int)],) + assert_equal(test, control) + + def test_append_on_nested(self): + # Test append_fields on nested fields + w = self.data[0] + test = append_fields(w, 'C', data=[10, 20, 30]) + control = ma.array([(1, (2, 3.0), 10), + (4, (5, 6.0), 20), + (-1, (-1, -1.), 30)], + mask=[( + 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], + dtype=[('a', int), + ('b', [('ba', float), ('bb', int)]), + ('C', int)],) + assert_equal(test, control) + + +class TestStackArrays: + # Test stack_arrays + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test stack_arrays on single arrays + (_, x, _, _) = self.data + test = stack_arrays((x,)) + assert_equal(test, x) + assert_(test is x) + + test = stack_arrays(x) + assert_equal(test, x) + assert_(test is x) + + def test_unnamed_fields(self): + # Tests combinations of arrays w/o named fields + (_, x, y, _) = self.data + + test = stack_arrays((x, x), usemask=False) + control = np.array([1, 2, 1, 2]) + assert_equal(test, control) + + test = stack_arrays((x, y), usemask=False) + control = np.array([1, 2, 10, 20, 30]) + assert_equal(test, control) + + test = stack_arrays((y, x), usemask=False) + control = np.array([10, 20, 30, 1, 2]) + assert_equal(test, control) + + def test_unnamed_and_named_fields(self): + # Test combination of arrays w/ & w/o named fields + (_, x, _, z) = self.data + + test = stack_arrays((x, z)) + control = ma.array([(1, -1, -1), (2, -1, -1), + (-1, 'A', 1), (-1, 'B', 2)], + mask=[(0, 1, 1), (0, 1, 1), + (1, 0, 0), (1, 0, 0)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + + def test_matching_named_fields(self): + # Test combination of arrays w/ matching field names + (_, x, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + test = stack_arrays((z, zz)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, zz, x)) + ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] + control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), + ('a', 10., 100., -1), ('b', 20., 200., -1), + ('c', 30., 300., -1), + (-1, -1, -1, 1), (-1, -1, -1, 2)], + dtype=ndtype, + mask=[(0, 0, 1, 1), (0, 0, 1, 1), + (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), + (1, 1, 1, 0), (1, 1, 1, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_defaults(self): + # Test defaults: no exception raised if keys of defaults are not fields. + (_, _, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} + test = stack_arrays((z, zz), defaults=defaults) + control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_autoconversion(self): + # Tests autoconversion + adtype = [('A', int), ('B', bool), ('C', float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [('A', int), ('B', float), ('C', float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + test = stack_arrays((a, b), autoconvert=True) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + with assert_raises(TypeError): + stack_arrays((a, b), autoconvert=False) + + def test_checktitles(self): + # Test using titles in the field names + adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + test = stack_arrays((a, b)) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_subdtype(self): + z = np.array([ + ('A', 1), ('B', 2) + ], dtype=[('A', '|S3'), ('B', float, (1,))]) + zz = np.array([ + ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.) + ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)]) + + res = stack_arrays((z, zz)) + expected = ma.array( + data=[ + (b'A', [1.0], 0), + (b'B', [2.0], 0), + (b'a', [10.0], 100.0), + (b'b', [20.0], 200.0), + (b'c', [30.0], 300.0)], + mask=[ + (False, [False], True), + (False, [False], True), + (False, [False], False), + (False, [False], False), + (False, [False], False) + ], + dtype=zz.dtype + ) + assert_equal(res.dtype, expected.dtype) + assert_equal(res, expected) + assert_equal(res.mask, expected.mask) + + +class TestJoinBy: + def setup_method(self): + self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('c', int)]) + self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('d', int)]) + + def test_inner_join(self): + # Basic test of join_by + a, b = self.a, self.b + + test = join_by('a', a, b, jointype='inner') + control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), + (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), + (9, 59, 69, 109, 104)], + dtype=[('a', int), ('b1', int), ('b2', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_join(self): + a, b = self.a, self.b + + # Fixme, this test is broken + #test = join_by(('a', 'b'), a, b) + #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), + # (7, 57, 107, 102), (8, 58, 108, 103), + # (9, 59, 109, 104)], + # dtype=[('a', int), ('b', int), + # ('c', int), ('d', int)]) + #assert_equal(test, control) + + # Hack to avoid pyflakes unused variable warnings + join_by(('a', 'b'), a, b) + np.array([(5, 55, 105, 100), (6, 56, 106, 101), + (7, 57, 107, 102), (8, 58, 108, 103), + (9, 59, 109, 104)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + + def test_join_subdtype(self): + # tests the bug in https://stackoverflow.com/q/44769632/102441 + foo = np.array([(1,)], + dtype=[('key', int)]) + bar = np.array([(1, np.array([1,2,3]))], + dtype=[('key', int), ('value', 'uint16', 3)]) + res = join_by('key', foo, bar) + assert_equal(res, bar.view(ma.MaskedArray)) + + def test_outer_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'outer') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (5, 65, -1, 100), (6, 56, 106, -1), + (6, 66, -1, 101), (7, 57, 107, -1), + (7, 67, -1, 102), (8, 58, 108, -1), + (8, 68, -1, 103), (9, 59, 109, -1), + (9, 69, -1, 104), (10, 70, -1, 105), + (11, 71, -1, 106), (12, 72, -1, 107), + (13, 73, -1, 108), (14, 74, -1, 109)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_leftouter_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'leftouter') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (6, 56, 106, -1), (7, 57, 107, -1), + (8, 58, 108, -1), (9, 59, 109, -1)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1)], + dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_different_field_order(self): + # gh-8940 + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + # this should not give a FutureWarning: + j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False) + assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2']) + + def test_duplicate_keys(self): + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b) + + def test_same_name_different_dtypes_key(self): + a_dtype = np.dtype([('key', 'S5'), ('value', ' 2**32 + + +def _add_keepdims(func): + """ hack in keepdims behavior into a function taking an axis """ + @functools.wraps(func) + def wrapped(a, axis, **kwargs): + res = func(a, axis=axis, **kwargs) + if axis is None: + axis = 0 # res is now a scalar, so we can insert this anywhere + return np.expand_dims(res, axis=axis) + return wrapped + + +class TestTakeAlongAxis: + def test_argequivalent(self): + """ Test it translates from arg to """ + from numpy.random import rand + a = rand(3, 4, 5) + + funcs = [ + (np.sort, np.argsort, dict()), + (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()), + (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()), + #(np.partition, np.argpartition, dict(kth=2)), + ] + + for func, argfunc, kwargs in funcs: + for axis in list(range(a.ndim)) + [None]: + a_func = func(a, axis=axis, **kwargs) + ai_func = argfunc(a, axis=axis, **kwargs) + assert_equal(a_func, take_along_axis(a, ai_func, axis=axis)) + + def test_invalid(self): + """ Test it errors when indices has too few dimensions """ + a = np.ones((10, 10)) + ai = np.ones((10, 2), dtype=np.intp) + + # sanity check + take_along_axis(a, ai, axis=1) + + # not enough indices + assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1) + # bool arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1) + # float arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) + # invalid axis + assert_raises(AxisError, take_along_axis, a, ai, axis=10) + + def test_empty(self): + """ Test everything is ok with empty results, even with inserted dims """ + a = np.ones((3, 4, 5)) + ai = np.ones((3, 0, 5), dtype=np.intp) + + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, ai.shape) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.ones((1, 2, 5), dtype=np.intp) + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, (3, 2, 5)) + + +class TestPutAlongAxis: + def test_replace_max(self): + a_base = np.array([[10, 30, 20], [60, 40, 50]]) + + for axis in list(range(a_base.ndim)) + [None]: + # we mutate this in the loop + a = a_base.copy() + + # replace the max with a small value + i_max = _add_keepdims(np.argmax)(a, axis=axis) + put_along_axis(a, i_max, -99, axis=axis) + + # find the new minimum, which should max + i_min = _add_keepdims(np.argmin)(a, axis=axis) + + assert_equal(i_min, i_max) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 + put_along_axis(a, ai, 20, axis=1) + assert_equal(take_along_axis(a, ai, axis=1), 20) + + +class TestApplyAlongAxis: + def test_simple(self): + a = np.ones((20, 10), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) + + def test_simple101(self): + a = np.ones((10, 101), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) + + def test_3d(self): + a = np.arange(27).reshape((3, 3, 3)) + assert_array_equal(apply_along_axis(np.sum, 0, a), + [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) + + def test_preserve_subclass(self): + def double(row): + return row * 2 + + class MyNDArray(np.ndarray): + pass + + m = np.array([[0, 1], [2, 3]]).view(MyNDArray) + expected = np.array([[0, 2], [4, 6]]).view(MyNDArray) + + result = apply_along_axis(double, 0, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + result = apply_along_axis(double, 1, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + def test_subclass(self): + class MinimalSubclass(np.ndarray): + data = 1 + + def minimal_function(array): + return array.data + + a = np.zeros((6, 3)).view(MinimalSubclass) + + assert_array_equal( + apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) + ) + + def test_scalar_array(self, cls=np.ndarray): + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(np.sum, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + def test_0d_array(self, cls=np.ndarray): + def sum_to_0d(x): + """ Sum x, returning a 0d array of the same class """ + assert_equal(x.ndim, 1) + return np.squeeze(np.sum(x, keepdims=True)) + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(sum_to_0d, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + res = apply_along_axis(sum_to_0d, 1, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) + + def test_axis_insertion(self, cls=np.ndarray): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + return (x[::-1] * x[1:,None]).view(cls) + + a2d = np.arange(6*3).reshape((6, 3)) + + # 2d insertion along first axis + actual = apply_along_axis(f1to2, 0, a2d) + expected = np.stack([ + f1to2(a2d[:,i]) for i in range(a2d.shape[1]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 2d insertion along last axis + actual = apply_along_axis(f1to2, 1, a2d) + expected = np.stack([ + f1to2(a2d[i,:]) for i in range(a2d.shape[0]) + ], axis=0).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 3d insertion along middle axis + a3d = np.arange(6*5*3).reshape((6, 5, 3)) + + actual = apply_along_axis(f1to2, 1, a3d) + expected = np.stack([ + np.stack([ + f1to2(a3d[i,:,j]) for i in range(a3d.shape[0]) + ], axis=0) + for j in range(a3d.shape[2]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + def test_subclass_preservation(self): + class MinimalSubclass(np.ndarray): + pass + self.test_scalar_array(MinimalSubclass) + self.test_0d_array(MinimalSubclass) + self.test_axis_insertion(MinimalSubclass) + + def test_axis_insertion_ma(self): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + res = x[::-1] * x[1:,None] + return np.ma.masked_where(res%5==0, res) + a = np.arange(6*3).reshape((6, 3)) + res = apply_along_axis(f1to2, 0, a) + assert_(isinstance(res, np.ma.masked_array)) + assert_equal(res.ndim, 3) + assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask) + assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask) + assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask) + + def test_tuple_func1d(self): + def sample_1d(x): + return x[1], x[0] + res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) + assert_array_equal(res, np.array([[2, 1], [4, 3]])) + + def test_empty(self): + # can't apply_along_axis when there's no chance to call the function + def never_call(x): + assert_(False) # should never be reached + + a = np.empty((0, 0)) + assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) + assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) + + # but it's sometimes ok with some non-zero dimensions + def empty_to_1(x): + assert_(len(x) == 0) + return 1 + + a = np.empty((10, 0)) + actual = np.apply_along_axis(empty_to_1, 1, a) + assert_equal(actual, np.ones(10)) + assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) + + def test_with_iterable_object(self): + # from issue 5248 + d = np.array([ + [{1, 11}, {2, 22}, {3, 33}], + [{4, 44}, {5, 55}, {6, 66}] + ]) + actual = np.apply_along_axis(lambda a: set.union(*a), 0, d) + expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}]) + + assert_equal(actual, expected) + + # issue 8642 - assert_equal doesn't detect this! + for i in np.ndindex(actual.shape): + assert_equal(type(actual[i]), type(expected[i])) + + +class TestApplyOverAxes: + def test_simple(self): + a = np.arange(24).reshape(2, 3, 4) + aoa_a = apply_over_axes(np.sum, a, [0, 2]) + assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) + + +class TestExpandDims: + def test_functionality(self): + s = (2, 3, 4, 5) + a = np.empty(s) + for axis in range(-5, 4): + b = expand_dims(a, axis) + assert_(b.shape[axis] == 1) + assert_(np.squeeze(b).shape == s) + + def test_axis_tuple(self): + a = np.empty((3, 3, 3)) + assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3) + assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1) + assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1) + assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3) + + def test_axis_out_of_range(self): + s = (2, 3, 4, 5) + a = np.empty(s) + assert_raises(AxisError, expand_dims, a, -6) + assert_raises(AxisError, expand_dims, a, 5) + + a = np.empty((3, 3, 3)) + assert_raises(AxisError, expand_dims, a, (0, -6)) + assert_raises(AxisError, expand_dims, a, (0, 5)) + + def test_repeated_axis(self): + a = np.empty((3, 3, 3)) + assert_raises(ValueError, expand_dims, a, axis=(1, 1)) + + def test_subclasses(self): + a = np.arange(10).reshape((2, 5)) + a = np.ma.array(a, mask=a%3 == 0) + + expanded = np.expand_dims(a, axis=1) + assert_(isinstance(expanded, np.ma.MaskedArray)) + assert_equal(expanded.shape, (2, 1, 5)) + assert_equal(expanded.mask.shape, (2, 1, 5)) + + +class TestArraySplit: + def test_integer_0_split(self): + a = np.arange(10) + assert_raises(ValueError, array_split, a, 0) + + def test_integer_split(self): + a = np.arange(10) + res = array_split(a, 1) + desired = [np.arange(10)] + compare_results(res, desired) + + res = array_split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + res = array_split(a, 3) + desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] + compare_results(res, desired) + + res = array_split(a, 4) + desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), + np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 5) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 6) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 7) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 8) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), + np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), + np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 9) + desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), + np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), + np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 10) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 11) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10), np.array([])] + compare_results(res, desired) + + def test_integer_split_2D_rows(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=0) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + # Same thing for manual splits: + res = array_split(a, [0, 1], axis=0) + tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), + np.array([np.arange(10)])] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + def test_integer_split_2D_cols(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=-1) + desired = [np.array([np.arange(4), np.arange(4)]), + np.array([np.arange(4, 7), np.arange(4, 7)]), + np.array([np.arange(7, 10), np.arange(7, 10)])] + compare_results(res, desired) + + def test_integer_split_2D_default(self): + """ This will fail if we change default axis + """ + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + # perhaps should check higher dimensions + + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + def test_integer_split_2D_rows_greater_max_int32(self): + a = np.broadcast_to([0], (1 << 32, 2)) + res = array_split(a, 4) + chunk = np.broadcast_to([0], (1 << 30, 2)) + tgt = [chunk] * 4 + for i in range(len(tgt)): + assert_equal(res[i].shape, tgt[i].shape) + + def test_index_split_simple(self): + a = np.arange(10) + indices = [1, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_low_bound(self): + a = np.arange(10) + indices = [0, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_high_bound(self): + a = np.arange(10) + indices = [0, 5, 7, 10, 12] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10), np.array([]), np.array([])] + compare_results(res, desired) + + +class TestSplit: + # The split function is essentially the same as array_split, + # except that it test if splitting will result in an + # equal split. Only test for this case. + + def test_equal_split(self): + a = np.arange(10) + res = split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + def test_unequal_split(self): + a = np.arange(10) + assert_raises(ValueError, split, a, 3) + + +class TestColumnStack: + def test_non_iterable(self): + assert_raises(TypeError, column_stack, 1) + + def test_1D_arrays(self): + # example from docstring + a = np.array((1, 2, 3)) + b = np.array((2, 3, 4)) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_2D_arrays(self): + # same as hstack 2D docstring example + a = np.array([[1], [2], [3]]) + b = np.array([[2], [3], [4]]) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + column_stack((np.arange(3) for _ in range(2))) + + +class TestDstack: + def test_non_iterable(self): + assert_raises(TypeError, dstack, 1) + + def test_0D_array(self): + a = np.array(1) + b = np.array(2) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = np.array([1]) + b = np.array([2]) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = np.array([[1], [2]]) + b = np.array([[1], [2]]) + res = dstack([a, b]) + desired = np.array([[[1, 1]], [[2, 2, ]]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = np.array([1, 2]) + b = np.array([1, 2]) + res = dstack([a, b]) + desired = np.array([[[1, 1], [2, 2]]]) + assert_array_equal(res, desired) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + dstack((np.arange(3) for _ in range(2))) + + +# array_split has more comprehensive test of splitting. +# only do simple test on hsplit, vsplit, and dsplit +class TestHsplit: + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, hsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + try: + hsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + res = hsplit(a, 2) + desired = [np.array([1, 2]), np.array([3, 4])] + compare_results(res, desired) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = hsplit(a, 2) + desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] + compare_results(res, desired) + + +class TestVsplit: + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, vsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, vsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + try: + vsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = vsplit(a, 2) + desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] + compare_results(res, desired) + + +class TestDsplit: + # Only testing for integer splits. + def test_non_iterable(self): + assert_raises(ValueError, dsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, dsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + assert_raises(ValueError, dsplit, a, 2) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + try: + dsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_3D_array(self): + a = np.array([[[1, 2, 3, 4], + [1, 2, 3, 4]], + [[1, 2, 3, 4], + [1, 2, 3, 4]]]) + res = dsplit(a, 2) + desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), + np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] + compare_results(res, desired) + + +class TestSqueeze: + def test_basic(self): + from numpy.random import rand + + a = rand(20, 10, 10, 1, 1) + b = rand(20, 1, 10, 1, 20) + c = rand(1, 1, 20, 10) + assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) + assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) + assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) + + # Squeezing to 0-dim should still give an ndarray + a = [[[1.5]]] + res = np.squeeze(a) + assert_equal(res, 1.5) + assert_equal(res.ndim, 0) + assert_equal(type(res), np.ndarray) + + +class TestKron: + def test_basic(self): + # Using 0-dimensional ndarray + a = np.array(1) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[1, 2], [3, 4]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array(1) + assert_array_equal(np.kron(a, b), k) + + # Using 1-dimensional ndarray + a = np.array([3]) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[3, 6], [9, 12]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array([3]) + assert_array_equal(np.kron(a, b), k) + + # Using 3-dimensional ndarray + a = np.array([[[1]], [[2]]]) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array([[[1]], [[2]]]) + k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]]) + assert_array_equal(np.kron(a, b), k) + + def test_return_type(self): + class myarray(np.ndarray): + __array_priority__ = 1.0 + + a = np.ones([2, 2]) + ma = myarray(a.shape, a.dtype, a.data) + assert_equal(type(kron(a, a)), np.ndarray) + assert_equal(type(kron(ma, ma)), myarray) + assert_equal(type(kron(a, ma)), myarray) + assert_equal(type(kron(ma, a)), myarray) + + @pytest.mark.parametrize( + "array_class", [np.asarray, np.asmatrix] + ) + def test_kron_smoke(self, array_class): + a = array_class(np.ones([3, 3])) + b = array_class(np.ones([3, 3])) + k = array_class(np.ones([9, 9])) + + assert_array_equal(np.kron(a, b), k) + + def test_kron_ma(self): + x = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]]) + k = np.ma.array(np.diag([1, 4, 4, 16]), + mask=~np.array(np.identity(4), dtype=bool)) + + assert_array_equal(k, np.kron(x, x)) + + @pytest.mark.parametrize( + "shape_a,shape_b", [ + ((1, 1), (1, 1)), + ((1, 2, 3), (4, 5, 6)), + ((2, 2), (2, 2, 2)), + ((1, 0), (1, 1)), + ((2, 0, 2), (2, 2)), + ((2, 0, 0, 2), (2, 0, 2)), + ]) + def test_kron_shape(self, shape_a, shape_b): + a = np.ones(shape_a) + b = np.ones(shape_b) + normalised_shape_a = (1,) * max(0, len(shape_b)-len(shape_a)) + shape_a + normalised_shape_b = (1,) * max(0, len(shape_a)-len(shape_b)) + shape_b + expected_shape = np.multiply(normalised_shape_a, normalised_shape_b) + + k = np.kron(a, b) + assert np.array_equal( + k.shape, expected_shape), "Unexpected shape from kron" + + +class TestTile: + def test_basic(self): + a = np.array([0, 1, 2]) + b = [[1, 2], [3, 4]] + assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) + assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) + assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) + assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) + assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], + [1, 2, 1, 2], [3, 4, 3, 4]]) + + def test_tile_one_repetition_on_array_gh4679(self): + a = np.arange(5) + b = tile(a, 1) + b += 2 + assert_equal(a, np.arange(5)) + + def test_empty(self): + a = np.array([[[]]]) + b = np.array([[], []]) + c = tile(b, 2).shape + d = tile(a, (3, 2, 5)).shape + assert_equal(c, (2, 0)) + assert_equal(d, (3, 2, 0)) + + def test_kroncompare(self): + from numpy.random import randint + + reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] + shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] + for s in shape: + b = randint(0, 10, size=s) + for r in reps: + a = np.ones(r, b.dtype) + large = tile(b, r) + klarge = kron(a, b) + assert_equal(large, klarge) + + +class TestMayShareMemory: + def test_basic(self): + d = np.ones((50, 60)) + d2 = np.ones((30, 60, 6)) + assert_(np.may_share_memory(d, d)) + assert_(np.may_share_memory(d, d[::-1])) + assert_(np.may_share_memory(d, d[::2])) + assert_(np.may_share_memory(d, d[1:, ::-1])) + + assert_(not np.may_share_memory(d[::-1], d2)) + assert_(not np.may_share_memory(d[::2], d2)) + assert_(not np.may_share_memory(d[1:, ::-1], d2)) + assert_(np.may_share_memory(d2[1:, ::-1], d2)) + + +# Utility +def compare_results(res, desired): + """Compare lists of arrays.""" + if len(res) != len(desired): + raise ValueError("Iterables have different lengths") + # See also PEP 618 for Python 3.10 + for x, y in zip(res, desired): + assert_array_equal(x, y) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_stride_tricks.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_stride_tricks.py new file mode 100644 index 0000000000000000000000000000000000000000..a603904340aea466cc58c7efbe9892d804e1b9a7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_stride_tricks.py @@ -0,0 +1,645 @@ +import numpy as np +from numpy._core._rational_tests import rational +from numpy.testing import ( + assert_equal, assert_array_equal, assert_raises, assert_, + assert_raises_regex, assert_warns, + ) +from numpy.lib._stride_tricks_impl import ( + as_strided, broadcast_arrays, _broadcast_shape, broadcast_to, + broadcast_shapes, sliding_window_view, + ) +import pytest + + +def assert_shapes_correct(input_shapes, expected_shape): + # Broadcast a list of arrays with the given input shapes and check the + # common output shape. + + inarrays = [np.zeros(s) for s in input_shapes] + outarrays = broadcast_arrays(*inarrays) + outshapes = [a.shape for a in outarrays] + expected = [expected_shape] * len(inarrays) + assert_equal(outshapes, expected) + + +def assert_incompatible_shapes_raise(input_shapes): + # Broadcast a list of arrays with the given (incompatible) input shapes + # and check that they raise a ValueError. + + inarrays = [np.zeros(s) for s in input_shapes] + assert_raises(ValueError, broadcast_arrays, *inarrays) + + +def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False): + # Broadcast two shapes against each other and check that the data layout + # is the same as if a ufunc did the broadcasting. + + x0 = np.zeros(shape0, dtype=int) + # Note that multiply.reduce's identity element is 1.0, so when shape1==(), + # this gives the desired n==1. + n = int(np.multiply.reduce(shape1)) + x1 = np.arange(n).reshape(shape1) + if transposed: + x0 = x0.T + x1 = x1.T + if flipped: + x0 = x0[::-1] + x1 = x1[::-1] + # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the + # result should be exactly the same as the broadcasted view of x1. + y = x0 + x1 + b0, b1 = broadcast_arrays(x0, x1) + assert_array_equal(y, b1) + + +def test_same(): + x = np.arange(10) + y = np.arange(10) + bx, by = broadcast_arrays(x, y) + assert_array_equal(x, bx) + assert_array_equal(y, by) + +def test_broadcast_kwargs(): + # ensure that a TypeError is appropriately raised when + # np.broadcast_arrays() is called with any keyword + # argument other than 'subok' + x = np.arange(10) + y = np.arange(10) + + with assert_raises_regex(TypeError, 'got an unexpected keyword'): + broadcast_arrays(x, y, dtype='float64') + + +def test_one_off(): + x = np.array([[1, 2, 3]]) + y = np.array([[1], [2], [3]]) + bx, by = broadcast_arrays(x, y) + bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) + by0 = bx0.T + assert_array_equal(bx0, bx) + assert_array_equal(by0, by) + + +def test_same_input_shapes(): + # Check that the final shape is just the input shape. + + data = [ + (), + (1,), + (3,), + (0, 1), + (0, 3), + (1, 0), + (3, 0), + (1, 3), + (3, 1), + (3, 3), + ] + for shape in data: + input_shapes = [shape] + # Single input. + assert_shapes_correct(input_shapes, shape) + # Double input. + input_shapes2 = [shape, shape] + assert_shapes_correct(input_shapes2, shape) + # Triple input. + input_shapes3 = [shape, shape, shape] + assert_shapes_correct(input_shapes3, shape) + + +def test_two_compatible_by_ones_input_shapes(): + # Check that two different input shapes of the same length, but some have + # ones, broadcast to the correct shape. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_two_compatible_by_prepending_ones_input_shapes(): + # Check that two different input shapes (of different lengths) broadcast + # to the correct shape. + + data = [ + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_incompatible_shapes_raise_valueerror(): + # Check that a ValueError is raised for incompatible shapes. + + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + ] + for input_shapes in data: + assert_incompatible_shapes_raise(input_shapes) + # Reverse the input shapes since broadcasting should be symmetric. + assert_incompatible_shapes_raise(input_shapes[::-1]) + + +def test_same_as_ufunc(): + # Check that the data layout is the same as if a ufunc did the operation. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], + "Shapes: %s %s" % (input_shapes[0], input_shapes[1])) + # Reverse the input shapes since broadcasting should be symmetric. + assert_same_as_ufunc(input_shapes[1], input_shapes[0]) + # Try them transposed, too. + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True) + # ... and flipped for non-rank-0 inputs in order to test negative + # strides. + if () not in input_shapes: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True) + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True) + + +def test_broadcast_to_succeeds(): + data = [ + [np.array(0), (0,), np.array(0)], + [np.array(0), (1,), np.zeros(1)], + [np.array(0), (3,), np.zeros(3)], + [np.ones(1), (1,), np.ones(1)], + [np.ones(1), (2,), np.ones(2)], + [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))], + [np.arange(3), (3,), np.arange(3)], + [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)], + [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])], + # test if shape is not a tuple + [np.ones(0), 0, np.ones(0)], + [np.ones(1), 1, np.ones(1)], + [np.ones(1), 2, np.ones(2)], + # these cases with size 0 are strange, but they reproduce the behavior + # of broadcasting with ufuncs (see test_same_as_ufunc above) + [np.ones(1), (0,), np.ones(0)], + [np.ones((1, 2)), (0, 2), np.ones((0, 2))], + [np.ones((2, 1)), (2, 0), np.ones((2, 0))], + ] + for input_array, shape, expected in data: + actual = broadcast_to(input_array, shape) + assert_array_equal(expected, actual) + + +def test_broadcast_to_raises(): + data = [ + [(0,), ()], + [(1,), ()], + [(3,), ()], + [(3,), (1,)], + [(3,), (2,)], + [(3,), (4,)], + [(1, 2), (2, 1)], + [(1, 1), (1,)], + [(1,), -1], + [(1,), (-1,)], + [(1, 2), (-1, 2)], + ] + for orig_shape, target_shape in data: + arr = np.zeros(orig_shape) + assert_raises(ValueError, lambda: broadcast_to(arr, target_shape)) + + +def test_broadcast_shape(): + # tests internal _broadcast_shape + # _broadcast_shape is already exercised indirectly by broadcast_arrays + # _broadcast_shape is also exercised by the public broadcast_shapes function + assert_equal(_broadcast_shape(), ()) + assert_equal(_broadcast_shape([1, 2]), (2,)) + assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1)) + assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2)) + + # regression tests for gh-5862 + assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,)) + bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32 + assert_raises(ValueError, lambda: _broadcast_shape(*bad_args)) + + +def test_broadcast_shapes_succeeds(): + # tests public broadcast_shapes + data = [ + [[], ()], + [[()], ()], + [[(7,)], (7,)], + [[(1, 2), (2,)], (1, 2)], + [[(1, 1)], (1, 1)], + [[(1, 1), (3, 4)], (3, 4)], + [[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)], + [[(5, 6, 1)], (5, 6, 1)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + [[(1,), (3,)], (3,)], + [[2, (3, 2)], (3, 2)], + ] + for input_shapes, target_shape in data: + assert_equal(broadcast_shapes(*input_shapes), target_shape) + + assert_equal(broadcast_shapes(*([(1, 2)] * 32)), (1, 2)) + assert_equal(broadcast_shapes(*([(1, 2)] * 100)), (1, 2)) + + # regression tests for gh-5862 + assert_equal(broadcast_shapes(*([(2,)] * 32)), (2,)) + + +def test_broadcast_shapes_raises(): + # tests public broadcast_shapes + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + [(1, 2), (3,1), (3,2), (10, 5)], + [2, (2, 3)], + ] + for input_shapes in data: + assert_raises(ValueError, lambda: broadcast_shapes(*input_shapes)) + + bad_args = [(2,)] * 32 + [(3,)] * 32 + assert_raises(ValueError, lambda: broadcast_shapes(*bad_args)) + + +def test_as_strided(): + a = np.array([None]) + a_view = as_strided(a) + expected = np.array([None]) + assert_array_equal(a_view, np.array([None])) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + expected = np.array([1, 3]) + assert_array_equal(a_view, expected) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize)) + expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + assert_array_equal(a_view, expected) + + # Regression test for gh-5081 + dt = np.dtype([('num', 'i4'), ('obj', 'O')]) + a = np.empty((4,), dtype=dt) + a['num'] = np.arange(1, 5) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + expected_num = [[1, 2, 3, 4]] * 3 + expected_obj = [[None]*4]*3 + assert_equal(a_view.dtype, dt) + assert_array_equal(expected_num, a_view['num']) + assert_array_equal(expected_obj, a_view['obj']) + + # Make sure that void types without fields are kept unchanged + a = np.empty((4,), dtype='V4') + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Make sure that the only type that could fail is properly handled + dt = np.dtype({'names': [''], 'formats': ['V4']}) + a = np.empty((4,), dtype=dt) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Custom dtypes should not be lost (gh-9161) + r = [rational(i) for i in range(4)] + a = np.array(r, dtype=rational) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + assert_array_equal([r] * 3, a_view) + + +class TestSlidingWindowView: + def test_1d(self): + arr = np.arange(5) + arr_view = sliding_window_view(arr, 2) + expected = np.array([[0, 1], + [1, 2], + [2, 3], + [3, 4]]) + assert_array_equal(arr_view, expected) + + def test_2d(self): + i, j = np.ogrid[:3, :4] + arr = 10*i + j + shape = (2, 2) + arr_view = sliding_window_view(arr, shape) + expected = np.array([[[[0, 1], [10, 11]], + [[1, 2], [11, 12]], + [[2, 3], [12, 13]]], + [[[10, 11], [20, 21]], + [[11, 12], [21, 22]], + [[12, 13], [22, 23]]]]) + assert_array_equal(arr_view, expected) + + def test_2d_with_axis(self): + i, j = np.ogrid[:3, :4] + arr = 10*i + j + arr_view = sliding_window_view(arr, 3, 0) + expected = np.array([[[0, 10, 20], + [1, 11, 21], + [2, 12, 22], + [3, 13, 23]]]) + assert_array_equal(arr_view, expected) + + def test_2d_repeated_axis(self): + i, j = np.ogrid[:3, :4] + arr = 10*i + j + arr_view = sliding_window_view(arr, (2, 3), (1, 1)) + expected = np.array([[[[0, 1, 2], + [1, 2, 3]]], + [[[10, 11, 12], + [11, 12, 13]]], + [[[20, 21, 22], + [21, 22, 23]]]]) + assert_array_equal(arr_view, expected) + + def test_2d_without_axis(self): + i, j = np.ogrid[:4, :4] + arr = 10*i + j + shape = (2, 3) + arr_view = sliding_window_view(arr, shape) + expected = np.array([[[[0, 1, 2], [10, 11, 12]], + [[1, 2, 3], [11, 12, 13]]], + [[[10, 11, 12], [20, 21, 22]], + [[11, 12, 13], [21, 22, 23]]], + [[[20, 21, 22], [30, 31, 32]], + [[21, 22, 23], [31, 32, 33]]]]) + assert_array_equal(arr_view, expected) + + def test_errors(self): + i, j = np.ogrid[:4, :4] + arr = 10*i + j + with pytest.raises(ValueError, match='cannot contain negative values'): + sliding_window_view(arr, (-1, 3)) + with pytest.raises( + ValueError, + match='must provide window_shape for all dimensions of `x`'): + sliding_window_view(arr, (1,)) + with pytest.raises( + ValueError, + match='Must provide matching length window_shape and axis'): + sliding_window_view(arr, (1, 3, 4), axis=(0, 1)) + with pytest.raises( + ValueError, + match='window shape cannot be larger than input array'): + sliding_window_view(arr, (5, 5)) + + def test_writeable(self): + arr = np.arange(5) + view = sliding_window_view(arr, 2, writeable=False) + assert_(not view.flags.writeable) + with pytest.raises( + ValueError, + match='assignment destination is read-only'): + view[0, 0] = 3 + view = sliding_window_view(arr, 2, writeable=True) + assert_(view.flags.writeable) + view[0, 1] = 3 + assert_array_equal(arr, np.array([0, 3, 2, 3, 4])) + + def test_subok(self): + class MyArray(np.ndarray): + pass + + arr = np.arange(5).view(MyArray) + assert_(not isinstance(sliding_window_view(arr, 2, + subok=False), + MyArray)) + assert_(isinstance(sliding_window_view(arr, 2, subok=True), MyArray)) + # Default behavior + assert_(not isinstance(sliding_window_view(arr, 2), MyArray)) + + +def as_strided_writeable(): + arr = np.ones(10) + view = as_strided(arr, writeable=False) + assert_(not view.flags.writeable) + + # Check that writeable also is fine: + view = as_strided(arr, writeable=True) + assert_(view.flags.writeable) + view[...] = 3 + assert_array_equal(arr, np.full_like(arr, 3)) + + # Test that things do not break down for readonly: + arr.flags.writeable = False + view = as_strided(arr, writeable=False) + view = as_strided(arr, writeable=True) + assert_(not view.flags.writeable) + + +class VerySimpleSubClass(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, subok=True, **kwargs).view(cls) + + +class SimpleSubClass(VerySimpleSubClass): + def __new__(cls, *args, **kwargs): + self = np.array(*args, subok=True, **kwargs).view(cls) + self.info = 'simple' + return self + + def __array_finalize__(self, obj): + self.info = getattr(obj, 'info', '') + ' finalized' + + +def test_subclasses(): + # test that subclass is preserved only if subok=True + a = VerySimpleSubClass([1, 2, 3, 4]) + assert_(type(a) is VerySimpleSubClass) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + assert_(type(a_view) is np.ndarray) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is VerySimpleSubClass) + # test that if a subclass has __array_finalize__, it is used + a = SimpleSubClass([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + + # similar tests for broadcast_arrays + b = np.arange(len(a)).reshape(-1, 1) + a_view, b_view = broadcast_arrays(a, b) + assert_(type(a_view) is np.ndarray) + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + a_view, b_view = broadcast_arrays(a, b, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + + # and for broadcast_to + shape = (2, 4) + a_view = broadcast_to(a, shape) + assert_(type(a_view) is np.ndarray) + assert_(a_view.shape == shape) + a_view = broadcast_to(a, shape, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(a_view.shape == shape) + + +def test_writeable(): + # broadcast_to should return a readonly array + original = np.array([1, 2, 3]) + result = broadcast_to(original, (2, 3)) + assert_equal(result.flags.writeable, False) + assert_raises(ValueError, result.__setitem__, slice(None), 0) + + # but the result of broadcast_arrays needs to be writeable, to + # preserve backwards compatibility + for is_broadcast, results in [(False, broadcast_arrays(original,)), + (True, broadcast_arrays(0, original))]: + for result in results: + # This will change to False in a future version + if is_broadcast: + with assert_warns(FutureWarning): + assert_equal(result.flags.writeable, True) + with assert_warns(DeprecationWarning): + result[:] = 0 + # Warning not emitted, writing to the array resets it + assert_equal(result.flags.writeable, True) + else: + # No warning: + assert_equal(result.flags.writeable, True) + + for results in [broadcast_arrays(original), + broadcast_arrays(0, original)]: + for result in results: + # resets the warn_on_write DeprecationWarning + result.flags.writeable = True + # check: no warning emitted + assert_equal(result.flags.writeable, True) + result[:] = 0 + + # keep readonly input readonly + original.flags.writeable = False + _, result = broadcast_arrays(0, original) + assert_equal(result.flags.writeable, False) + + # regression test for GH6491 + shape = (2,) + strides = [0] + tricky_array = as_strided(np.array(0), shape, strides) + other = np.zeros((1,)) + first, second = broadcast_arrays(tricky_array, other) + assert_(first.shape == second.shape) + + +def test_writeable_memoryview(): + # The result of broadcast_arrays exports as a non-writeable memoryview + # because otherwise there is no good way to opt in to the new behaviour + # (i.e. you would need to set writeable to False explicitly). + # See gh-13929. + original = np.array([1, 2, 3]) + + for is_broadcast, results in [(False, broadcast_arrays(original,)), + (True, broadcast_arrays(0, original))]: + for result in results: + # This will change to False in a future version + if is_broadcast: + # memoryview(result, writable=True) will give warning but cannot + # be tested using the python API. + assert memoryview(result).readonly + else: + assert not memoryview(result).readonly + + +def test_reference_types(): + input_array = np.array('a', dtype=object) + expected = np.array(['a'] * 3, dtype=object) + actual = broadcast_to(input_array, (3,)) + assert_array_equal(expected, actual) + + actual, _ = broadcast_arrays(input_array, np.ones(3)) + assert_array_equal(expected, actual) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_twodim_base.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_twodim_base.py new file mode 100644 index 0000000000000000000000000000000000000000..37d0c7306696f584dac39afda6431cbc7e399af5 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_twodim_base.py @@ -0,0 +1,541 @@ +"""Test functions for matrix module + +""" +from numpy.testing import ( + assert_equal, assert_array_equal, assert_array_max_ulp, + assert_array_almost_equal, assert_raises, assert_ +) +from numpy import ( + arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d, + tri, mask_indices, triu_indices, triu_indices_from, tril_indices, + tril_indices_from, vander, +) +import numpy as np + +import pytest + + +def get_mat(n): + data = arange(n) + data = add.outer(data, data) + return data + + +class TestEye: + def test_basic(self): + assert_equal(eye(4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]])) + + assert_equal(eye(4, dtype='f'), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]], 'f')) + + assert_equal(eye(3) == 1, + eye(3, dtype=bool)) + + def test_uint64(self): + # Regression test for gh-9982 + assert_equal(eye(np.uint64(2), dtype=int), array([[1, 0], [0, 1]])) + assert_equal(eye(np.uint64(2), M=np.uint64(4), k=np.uint64(1)), + array([[0, 1, 0, 0], [0, 0, 1, 0]])) + + def test_diag(self): + assert_equal(eye(4, k=1), + array([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, k=-1), + array([[0, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_2d(self): + assert_equal(eye(4, 3), + array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 0, 0]])) + + assert_equal(eye(3, 4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_diag2d(self): + assert_equal(eye(3, 4, k=2), + array([[0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, 3, k=-2), + array([[0, 0, 0], + [0, 0, 0], + [1, 0, 0], + [0, 1, 0]])) + + def test_eye_bounds(self): + assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) + assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]]) + assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]]) + assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]]) + assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]]) + assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]]) + assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]]) + + def test_strings(self): + assert_equal(eye(2, 2, dtype='S3'), + [[b'1', b''], [b'', b'1']]) + + def test_bool(self): + assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) + + def test_order(self): + mat_c = eye(4, 3, k=-1) + mat_f = eye(4, 3, k=-1, order='F') + assert_equal(mat_c, mat_f) + assert mat_c.flags.c_contiguous + assert not mat_c.flags.f_contiguous + assert not mat_f.flags.c_contiguous + assert mat_f.flags.f_contiguous + + +class TestDiag: + def test_vector(self): + vals = (100 * arange(5)).astype('l') + b = zeros((5, 5)) + for k in range(5): + b[k, k] = vals[k] + assert_equal(diag(vals), b) + b = zeros((7, 7)) + c = b.copy() + for k in range(5): + b[k, k + 2] = vals[k] + c[k + 2, k] = vals[k] + assert_equal(diag(vals, k=2), b) + assert_equal(diag(vals, k=-2), c) + + def test_matrix(self, vals=None): + if vals is None: + vals = (100 * get_mat(5) + 1).astype('l') + b = zeros((5,)) + for k in range(5): + b[k] = vals[k, k] + assert_equal(diag(vals), b) + b = b * 0 + for k in range(3): + b[k] = vals[k, k + 2] + assert_equal(diag(vals, 2), b[:3]) + for k in range(3): + b[k] = vals[k + 2, k] + assert_equal(diag(vals, -2), b[:3]) + + def test_fortran_order(self): + vals = array((100 * get_mat(5) + 1), order='F', dtype='l') + self.test_matrix(vals) + + def test_diag_bounds(self): + A = [[1, 2], [3, 4], [5, 6]] + assert_equal(diag(A, k=2), []) + assert_equal(diag(A, k=1), [2]) + assert_equal(diag(A, k=0), [1, 4]) + assert_equal(diag(A, k=-1), [3, 6]) + assert_equal(diag(A, k=-2), [5]) + assert_equal(diag(A, k=-3), []) + + def test_failure(self): + assert_raises(ValueError, diag, [[[1]]]) + + +class TestFliplr: + def test_basic(self): + assert_raises(ValueError, fliplr, ones(4)) + a = get_mat(4) + b = a[:, ::-1] + assert_equal(fliplr(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[2, 1, 0], + [5, 4, 3]] + assert_equal(fliplr(a), b) + + +class TestFlipud: + def test_basic(self): + a = get_mat(4) + b = a[::-1, :] + assert_equal(flipud(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[3, 4, 5], + [0, 1, 2]] + assert_equal(flipud(a), b) + + +class TestHistogram2d: + def test_simple(self): + x = array( + [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891]) + y = array( + [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673]) + xedges = np.linspace(0, 1, 10) + yedges = np.linspace(0, 1, 10) + H = histogram2d(x, y, (xedges, yedges))[0] + answer = array( + [[0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]]) + assert_array_equal(H.T, answer) + H = histogram2d(x, y, xedges)[0] + assert_array_equal(H.T, answer) + H, xedges, yedges = histogram2d(list(range(10)), list(range(10))) + assert_array_equal(H, eye(10, 10)) + assert_array_equal(xedges, np.linspace(0, 9, 11)) + assert_array_equal(yedges, np.linspace(0, 9, 11)) + + def test_asym(self): + x = array([1, 1, 2, 3, 4, 4, 4, 5]) + y = array([1, 3, 2, 0, 1, 2, 3, 4]) + H, xed, yed = histogram2d( + x, y, (6, 5), range=[[0, 6], [0, 5]], density=True) + answer = array( + [[0., 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [0, 0, 1, 0, 0], + [1, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 1]]) + assert_array_almost_equal(H, answer/8., 3) + assert_array_equal(xed, np.linspace(0, 6, 7)) + assert_array_equal(yed, np.linspace(0, 5, 6)) + + def test_density(self): + x = array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + y = array([1, 1, 1, 2, 2, 2, 3, 3, 3]) + H, xed, yed = histogram2d( + x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True) + answer = array([[1, 1, .5], + [1, 1, .5], + [.5, .5, .25]])/9. + assert_array_almost_equal(H, answer, 3) + + def test_all_outliers(self): + r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6 + H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1])) + assert_array_equal(H, 0) + + def test_empty(self): + a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, array([[0.]])) + + a, edge1, edge2 = histogram2d([], [], bins=4) + assert_array_max_ulp(a, np.zeros((4, 4))) + + def test_binparameter_combination(self): + x = array( + [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599, + 0.59944483, 1]) + y = array( + [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682, + 0.15886423, 1]) + edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1) + H, xe, ye = histogram2d(x, y, (edges, 4)) + answer = array( + [[2., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 1., 0., 0.], + [1., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1])) + H, xe, ye = histogram2d(x, y, (4, edges)) + answer = array( + [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1])) + + def test_dispatch(self): + class ShouldDispatch: + def __array_function__(self, function, types, args, kwargs): + return types, args, kwargs + + xy = [1, 2] + s_d = ShouldDispatch() + r = histogram2d(s_d, xy) + # Cannot use assert_equal since that dispatches... + assert_(r == ((ShouldDispatch,), (s_d, xy), {})) + r = histogram2d(xy, s_d) + assert_(r == ((ShouldDispatch,), (xy, s_d), {})) + r = histogram2d(xy, xy, bins=s_d) + assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=s_d))) + r = histogram2d(xy, xy, bins=[s_d, 5]) + assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=[s_d, 5]))) + assert_raises(Exception, histogram2d, xy, xy, bins=[s_d]) + r = histogram2d(xy, xy, weights=s_d) + assert_(r, ((ShouldDispatch,), (xy, xy), dict(weights=s_d))) + + @pytest.mark.parametrize(("x_len", "y_len"), [(10, 11), (20, 19)]) + def test_bad_length(self, x_len, y_len): + x, y = np.ones(x_len), np.ones(y_len) + with pytest.raises(ValueError, + match='x and y must have the same length.'): + histogram2d(x, y) + + +class TestTri: + def test_dtype(self): + out = array([[1, 0, 0], + [1, 1, 0], + [1, 1, 1]]) + assert_array_equal(tri(3), out) + assert_array_equal(tri(3, dtype=bool), out.astype(bool)) + + +def test_tril_triu_ndim2(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.ones((2, 2), dtype=dtype) + b = np.tril(a) + c = np.triu(a) + assert_array_equal(b, [[1, 0], [1, 1]]) + assert_array_equal(c, b.T) + # should return the same dtype as the original array + assert_equal(b.dtype, a.dtype) + assert_equal(c.dtype, a.dtype) + + +def test_tril_triu_ndim3(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.array([ + [[1, 1], [1, 1]], + [[1, 1], [1, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_tril_desired = np.array([ + [[1, 0], [1, 1]], + [[1, 0], [1, 0]], + [[1, 0], [0, 0]], + ], dtype=dtype) + a_triu_desired = np.array([ + [[1, 1], [0, 1]], + [[1, 1], [0, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_triu_observed = np.triu(a) + a_tril_observed = np.tril(a) + assert_array_equal(a_triu_observed, a_triu_desired) + assert_array_equal(a_tril_observed, a_tril_desired) + assert_equal(a_triu_observed.dtype, a.dtype) + assert_equal(a_tril_observed.dtype, a.dtype) + + +def test_tril_triu_with_inf(): + # Issue 4859 + arr = np.array([[1, 1, np.inf], + [1, 1, 1], + [np.inf, 1, 1]]) + out_tril = np.array([[1, 0, 0], + [1, 1, 0], + [np.inf, 1, 1]]) + out_triu = out_tril.T + assert_array_equal(np.triu(arr), out_triu) + assert_array_equal(np.tril(arr), out_tril) + + +def test_tril_triu_dtype(): + # Issue 4916 + # tril and triu should return the same dtype as input + for c in np.typecodes['All']: + if c == 'V': + continue + arr = np.zeros((3, 3), dtype=c) + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + # check special cases + arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'], + ['2004-01-01T12:00', '2003-01-03T13:45']], + dtype='datetime64') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + arr = np.zeros((3, 3), dtype='f4,f4') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + +def test_mask_indices(): + # simple test without offset + iu = mask_indices(3, np.triu) + a = np.arange(9).reshape(3, 3) + assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8])) + # Now with an offset + iu1 = mask_indices(3, np.triu, 1) + assert_array_equal(a[iu1], array([1, 2, 5])) + + +def test_tril_indices(): + # indices without and with offset + il1 = tril_indices(4) + il2 = tril_indices(4, k=2) + il3 = tril_indices(4, m=5) + il4 = tril_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # indexing: + assert_array_equal(a[il1], + array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16])) + assert_array_equal(b[il3], + array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19])) + + # And for assigning values: + a[il1] = -1 + assert_array_equal(a, + array([[-1, 2, 3, 4], + [-1, -1, 7, 8], + [-1, -1, -1, 12], + [-1, -1, -1, -1]])) + b[il3] = -1 + assert_array_equal(b, + array([[-1, 2, 3, 4, 5], + [-1, -1, 8, 9, 10], + [-1, -1, -1, 14, 15], + [-1, -1, -1, -1, 20]])) + # These cover almost the whole array (two diagonals right of the main one): + a[il2] = -10 + assert_array_equal(a, + array([[-10, -10, -10, 4], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]])) + b[il4] = -10 + assert_array_equal(b, + array([[-10, -10, -10, 4, 5], + [-10, -10, -10, -10, 10], + [-10, -10, -10, -10, -10], + [-10, -10, -10, -10, -10]])) + + +class TestTriuIndices: + def test_triu_indices(self): + iu1 = triu_indices(4) + iu2 = triu_indices(4, k=2) + iu3 = triu_indices(4, m=5) + iu4 = triu_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # Both for indexing: + assert_array_equal(a[iu1], + array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16])) + assert_array_equal(b[iu3], + array([1, 2, 3, 4, 5, 7, 8, 9, + 10, 13, 14, 15, 19, 20])) + + # And for assigning values: + a[iu1] = -1 + assert_array_equal(a, + array([[-1, -1, -1, -1], + [5, -1, -1, -1], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu3] = -1 + assert_array_equal(b, + array([[-1, -1, -1, -1, -1], + [6, -1, -1, -1, -1], + [11, 12, -1, -1, -1], + [16, 17, 18, -1, -1]])) + + # These cover almost the whole array (two diagonals right of the + # main one): + a[iu2] = -10 + assert_array_equal(a, + array([[-1, -1, -10, -10], + [5, -1, -1, -10], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu4] = -10 + assert_array_equal(b, + array([[-1, -1, -10, -10, -10], + [6, -1, -1, -10, -10], + [11, 12, -1, -1, -10], + [16, 17, 18, -1, -1]])) + + +class TestTrilIndicesFrom: + def test_exceptions(self): + assert_raises(ValueError, tril_indices_from, np.ones((2,))) + assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, tril_indices_from, np.ones((2, 3))) + + +class TestTriuIndicesFrom: + def test_exceptions(self): + assert_raises(ValueError, triu_indices_from, np.ones((2,))) + assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, triu_indices_from, np.ones((2, 3))) + + +class TestVander: + def test_basic(self): + c = np.array([0, 1, -2, 3]) + v = vander(c) + powers = np.array([[0, 0, 0, 0, 1], + [1, 1, 1, 1, 1], + [16, -8, 4, -2, 1], + [81, 27, 9, 3, 1]]) + # Check default value of N: + assert_array_equal(v, powers[:, 1:]) + # Check a range of N values, including 0 and 5 (greater than default) + m = powers.shape[1] + for n in range(6): + v = vander(c, N=n) + assert_array_equal(v, powers[:, m-n:m]) + + def test_dtypes(self): + c = array([11, -12, 13], dtype=np.int8) + v = vander(c) + expected = np.array([[121, 11, 1], + [144, -12, 1], + [169, 13, 1]]) + assert_array_equal(v, expected) + + c = array([1.0+1j, 1.0-1j]) + v = vander(c, N=3) + expected = np.array([[2j, 1+1j, 1], + [-2j, 1-1j, 1]]) + # The data is floating point, but the values are small integers, + # so assert_array_equal *should* be safe here (rather than, say, + # assert_array_almost_equal). + assert_array_equal(v, expected) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_type_check.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_type_check.py new file mode 100644 index 0000000000000000000000000000000000000000..32207160e1ac0f34b9048f5b172cf4f7d8045517 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_type_check.py @@ -0,0 +1,465 @@ +import numpy as np +from numpy import ( + common_type, mintypecode, isreal, iscomplex, isposinf, isneginf, + nan_to_num, isrealobj, iscomplexobj, real_if_close + ) +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises + ) + + +def assert_all(x): + assert_(np.all(x), x) + + +class TestCommonType: + def test_basic(self): + ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32) + af16 = np.array([[1, 2], [3, 4]], dtype=np.float16) + af32 = np.array([[1, 2], [3, 4]], dtype=np.float32) + af64 = np.array([[1, 2], [3, 4]], dtype=np.float64) + acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.complex64) + acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.complex128) + assert_(common_type(ai32) == np.float64) + assert_(common_type(af16) == np.float16) + assert_(common_type(af32) == np.float32) + assert_(common_type(af64) == np.float64) + assert_(common_type(acs) == np.complex64) + assert_(common_type(acd) == np.complex128) + + +class TestMintypecode: + + def test_default_1(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype), 'd') + assert_equal(mintypecode('f'), 'f') + assert_equal(mintypecode('d'), 'd') + assert_equal(mintypecode('F'), 'F') + assert_equal(mintypecode('D'), 'D') + + def test_default_2(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype+'f'), 'f') + assert_equal(mintypecode(itype+'d'), 'd') + assert_equal(mintypecode(itype+'F'), 'F') + assert_equal(mintypecode(itype+'D'), 'D') + assert_equal(mintypecode('ff'), 'f') + assert_equal(mintypecode('fd'), 'd') + assert_equal(mintypecode('fF'), 'F') + assert_equal(mintypecode('fD'), 'D') + assert_equal(mintypecode('df'), 'd') + assert_equal(mintypecode('dd'), 'd') + #assert_equal(mintypecode('dF',savespace=1),'F') + assert_equal(mintypecode('dF'), 'D') + assert_equal(mintypecode('dD'), 'D') + assert_equal(mintypecode('Ff'), 'F') + #assert_equal(mintypecode('Fd',savespace=1),'F') + assert_equal(mintypecode('Fd'), 'D') + assert_equal(mintypecode('FF'), 'F') + assert_equal(mintypecode('FD'), 'D') + assert_equal(mintypecode('Df'), 'D') + assert_equal(mintypecode('Dd'), 'D') + assert_equal(mintypecode('DF'), 'D') + assert_equal(mintypecode('DD'), 'D') + + def test_default_3(self): + assert_equal(mintypecode('fdF'), 'D') + #assert_equal(mintypecode('fdF',savespace=1),'F') + assert_equal(mintypecode('fdD'), 'D') + assert_equal(mintypecode('fFD'), 'D') + assert_equal(mintypecode('dFD'), 'D') + + assert_equal(mintypecode('ifd'), 'd') + assert_equal(mintypecode('ifF'), 'F') + assert_equal(mintypecode('ifD'), 'D') + assert_equal(mintypecode('idF'), 'D') + #assert_equal(mintypecode('idF',savespace=1),'F') + assert_equal(mintypecode('idD'), 'D') + + +class TestIsscalar: + + def test_basic(self): + assert_(np.isscalar(3)) + assert_(not np.isscalar([3])) + assert_(not np.isscalar((3,))) + assert_(np.isscalar(3j)) + assert_(np.isscalar(4.0)) + + +class TestReal: + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(y, np.real(y)) + + y = np.array(1) + out = np.real(y) + assert_array_equal(y, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.real(y) + assert_equal(y, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,)+1j*np.random.rand(10,) + assert_array_equal(y.real, np.real(y)) + + y = np.array(1 + 1j) + out = np.real(y) + assert_array_equal(y.real, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.real(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestImag: + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(0, np.imag(y)) + + y = np.array(1) + out = np.imag(y) + assert_array_equal(0, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.imag(y) + assert_equal(0, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,)+1j*np.random.rand(10,) + assert_array_equal(y.imag, np.imag(y)) + + y = np.array(1 + 1j) + out = np.imag(y) + assert_array_equal(y.imag, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.imag(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestIscomplex: + + def test_fail(self): + z = np.array([-1, 0, 1]) + res = iscomplex(z) + assert_(not np.any(res, axis=0)) + + def test_pass(self): + z = np.array([-1j, 1, 0]) + res = iscomplex(z) + assert_array_equal(res, [1, 0, 0]) + + +class TestIsreal: + + def test_pass(self): + z = np.array([-1, 0, 1j]) + res = isreal(z) + assert_array_equal(res, [1, 1, 0]) + + def test_fail(self): + z = np.array([-1j, 1, 0]) + res = isreal(z) + assert_array_equal(res, [0, 1, 1]) + + +class TestIscomplexobj: + + def test_basic(self): + z = np.array([-1, 0, 1]) + assert_(not iscomplexobj(z)) + z = np.array([-1j, 0, -1]) + assert_(iscomplexobj(z)) + + def test_scalar(self): + assert_(not iscomplexobj(1.0)) + assert_(iscomplexobj(1+0j)) + + def test_list(self): + assert_(iscomplexobj([3, 1+0j, True])) + assert_(not iscomplexobj([3, 1, True])) + + def test_duck(self): + class DummyComplexArray: + @property + def dtype(self): + return np.dtype(complex) + dummy = DummyComplexArray() + assert_(iscomplexobj(dummy)) + + def test_pandas_duck(self): + # This tests a custom np.dtype duck-typed class, such as used by pandas + # (pandas.core.dtypes) + class PdComplex(np.complex128): + pass + class PdDtype: + name = 'category' + names = None + type = PdComplex + kind = 'c' + str = ' 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + # perform the same tests but with nan, posinf and neginf keywords + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1))/0., + nan=10, posinf=20, neginf=30) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1))/0. + result = nan_to_num(vals, copy=False) + + assert_(result is vals) + assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) + assert_(vals[1] == 0) + assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1))/0. + result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30) + + assert_(result is vals) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) + + def test_array(self): + vals = nan_to_num([1]) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) + vals = nan_to_num([1], nan=10, posinf=20, neginf=30) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) + + def test_integer(self): + vals = nan_to_num(1) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) + vals = nan_to_num(1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) + + def test_float(self): + vals = nan_to_num(1.0) + assert_all(vals == 1.0) + assert_equal(type(vals), np.float64) + vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1.1) + assert_equal(type(vals), np.float64) + + def test_complex_good(self): + vals = nan_to_num(1+1j) + assert_all(vals == 1+1j) + assert_equal(type(vals), np.complex128) + vals = nan_to_num(1+1j, nan=10, posinf=20, neginf=30) + assert_all(vals == 1+1j) + assert_equal(type(vals), np.complex128) + + def test_complex_bad(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(0+1.j)/0. + vals = nan_to_num(v) + # !! This is actually (unexpectedly) zero + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex128) + + def test_complex_bad2(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(-1+1.j)/0. + vals = nan_to_num(v) + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex128) + # Fixme + #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals)) + # !! This is actually (unexpectedly) positive + # !! inf. Comment out for now, and see if it + # !! changes + #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) + + def test_do_not_rewrite_previous_keyword(self): + # This is done to test that when, for instance, nan=np.inf then these + # values are not rewritten by posinf keyword to the posinf value. + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1))/0., nan=np.inf, posinf=999) + assert_all(np.isfinite(vals[[0, 2]])) + assert_all(vals[0] < -1e10) + assert_equal(vals[[1, 2]], [np.inf, 999]) + assert_equal(type(vals), np.ndarray) + + +class TestRealIfClose: + + def test_basic(self): + a = np.random.rand(10) + b = real_if_close(a+1e-15j) + assert_all(isrealobj(b)) + assert_array_equal(a, b) + b = real_if_close(a+1e-7j) + assert_all(iscomplexobj(b)) + b = real_if_close(a+1e-7j, tol=1e-6) + assert_all(isrealobj(b)) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_ufunclike.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_ufunclike.py new file mode 100644 index 0000000000000000000000000000000000000000..3443f18f59af0c93efaac9b2ffd66e70c69213b0 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_ufunclike.py @@ -0,0 +1,100 @@ +import numpy as np + +from numpy import fix, isposinf, isneginf +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises +) + + +class TestUfunclike: + + def test_isposinf(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape, bool) + tgt = np.array([True, False, False, False, False, False]) + + res = isposinf(a) + assert_equal(res, tgt) + res = isposinf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex128) + with assert_raises(TypeError): + isposinf(a) + + def test_isneginf(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape, bool) + tgt = np.array([False, True, False, False, False, False]) + + res = isneginf(a) + assert_equal(res, tgt) + res = isneginf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex128) + with assert_raises(TypeError): + isneginf(a) + + def test_fix(self): + a = np.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) + out = np.zeros(a.shape, float) + tgt = np.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]]) + + res = fix(a) + assert_equal(res, tgt) + res = fix(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + assert_equal(fix(3.14), 3) + + def test_fix_with_subclass(self): + class MyArray(np.ndarray): + def __new__(cls, data, metadata=None): + res = np.array(data, copy=True).view(cls) + res.metadata = metadata + return res + + def __array_wrap__(self, obj, context=None, return_scalar=False): + if not isinstance(obj, MyArray): + obj = obj.view(MyArray) + if obj.metadata is None: + obj.metadata = self.metadata + return obj + + def __array_finalize__(self, obj): + self.metadata = getattr(obj, 'metadata', None) + return self + + a = np.array([1.1, -1.1]) + m = MyArray(a, metadata='foo') + f = fix(m) + assert_array_equal(f, np.array([1, -1])) + assert_(isinstance(f, MyArray)) + assert_equal(f.metadata, 'foo') + + # check 0d arrays don't decay to scalars + m0d = m[0,...] + m0d.metadata = 'bar' + f0d = fix(m0d) + assert_(isinstance(f0d, MyArray)) + assert_equal(f0d.metadata, 'bar') + + def test_scalar(self): + x = np.inf + actual = np.isposinf(x) + expected = np.True_ + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + x = -3.4 + actual = np.fix(x) + expected = np.float64(-3.0) + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + out = np.array(0.0) + actual = np.fix(x, out=out) + assert_(actual is out) diff --git a/phivenv/Lib/site-packages/numpy/lib/tests/test_utils.py b/phivenv/Lib/site-packages/numpy/lib/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7ceb67fa1c16908fdc1b61e73b750ff6a7f7f10e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/lib/tests/test_utils.py @@ -0,0 +1,80 @@ +import pytest + +import numpy as np +from numpy.testing import assert_raises_regex +import numpy.lib._utils_impl as _utils_impl + +from io import StringIO + + +def test_assert_raises_regex_context_manager(): + with assert_raises_regex(ValueError, 'no deprecation warning'): + raise ValueError('no deprecation warning') + + +def test_info_method_heading(): + # info(class) should only print "Methods:" heading if methods exist + + class NoPublicMethods: + pass + + class WithPublicMethods: + def first_method(): + pass + + def _has_method_heading(cls): + out = StringIO() + np.info(cls, output=out) + return 'Methods:' in out.getvalue() + + assert _has_method_heading(WithPublicMethods) + assert not _has_method_heading(NoPublicMethods) + + +def test_drop_metadata(): + def _compare_dtypes(dt1, dt2): + return np.can_cast(dt1, dt2, casting='no') + + # structured dtype + dt = np.dtype([('l1', [('l2', np.dtype('S8', metadata={'msg': 'toto'}))])], + metadata={'msg': 'titi'}) + dt_m = _utils_impl.drop_metadata(dt) + assert _compare_dtypes(dt, dt_m) is True + assert dt_m.metadata is None + assert dt_m['l1'].metadata is None + assert dt_m['l1']['l2'].metadata is None + + # alignment + dt = np.dtype([('x', '>> from numpy import linalg as LA + >>> LA.inv(np.zeros((2,2))) + Traceback (most recent call last): + File "", line 1, in + File "...linalg.py", line 350, + in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) + File "...linalg.py", line 249, + in solve + raise LinAlgError('Singular matrix') + numpy.linalg.LinAlgError: Singular matrix + + """ + + +def _raise_linalgerror_singular(err, flag): + raise LinAlgError("Singular matrix") + +def _raise_linalgerror_nonposdef(err, flag): + raise LinAlgError("Matrix is not positive definite") + +def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): + raise LinAlgError("Eigenvalues did not converge") + +def _raise_linalgerror_svd_nonconvergence(err, flag): + raise LinAlgError("SVD did not converge") + +def _raise_linalgerror_lstsq(err, flag): + raise LinAlgError("SVD did not converge in Linear Least Squares") + +def _raise_linalgerror_qr(err, flag): + raise LinAlgError("Incorrect argument found while performing " + "QR factorization") + + +def _makearray(a): + new = asarray(a) + wrap = getattr(a, "__array_wrap__", new.__array_wrap__) + return new, wrap + +def isComplexType(t): + return issubclass(t, complexfloating) + + +_real_types_map = {single: single, + double: double, + csingle: single, + cdouble: double} + +_complex_types_map = {single: csingle, + double: cdouble, + csingle: csingle, + cdouble: cdouble} + +def _realType(t, default=double): + return _real_types_map.get(t, default) + +def _complexType(t, default=cdouble): + return _complex_types_map.get(t, default) + +def _commonType(*arrays): + # in lite version, use higher precision (always double or cdouble) + result_type = single + is_complex = False + for a in arrays: + type_ = a.dtype.type + if issubclass(type_, inexact): + if isComplexType(type_): + is_complex = True + rt = _realType(type_, default=None) + if rt is double: + result_type = double + elif rt is None: + # unsupported inexact scalar + raise TypeError("array type %s is unsupported in linalg" % + (a.dtype.name,)) + else: + result_type = double + if is_complex: + result_type = _complex_types_map[result_type] + return cdouble, result_type + else: + return double, result_type + + +def _to_native_byte_order(*arrays): + ret = [] + for arr in arrays: + if arr.dtype.byteorder not in ('=', '|'): + ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) + else: + ret.append(arr) + if len(ret) == 1: + return ret[0] + else: + return ret + + +def _assert_2d(*arrays): + for a in arrays: + if a.ndim != 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'two-dimensional' % a.ndim) + +def _assert_stacked_2d(*arrays): + for a in arrays: + if a.ndim < 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'at least two-dimensional' % a.ndim) + +def _assert_stacked_square(*arrays): + for a in arrays: + m, n = a.shape[-2:] + if m != n: + raise LinAlgError('Last 2 dimensions of the array must be square') + +def _assert_finite(*arrays): + for a in arrays: + if not isfinite(a).all(): + raise LinAlgError("Array must not contain infs or NaNs") + +def _is_empty_2d(arr): + # check size first for efficiency + return arr.size == 0 and prod(arr.shape[-2:]) == 0 + + +def transpose(a): + """ + Transpose each matrix in a stack of matrices. + + Unlike np.transpose, this only swaps the last two axes, rather than all of + them + + Parameters + ---------- + a : (...,M,N) array_like + + Returns + ------- + aT : (...,N,M) ndarray + """ + return swapaxes(a, -1, -2) + +# Linear equations + +def _tensorsolve_dispatcher(a, b, axes=None): + return (a, b) + + +@array_function_dispatch(_tensorsolve_dispatcher) +def tensorsolve(a, b, axes=None): + """ + Solve the tensor equation ``a x = b`` for x. + + It is assumed that all indices of `x` are summed over in the product, + together with the rightmost indices of `a`, as is done in, for example, + ``tensordot(a, x, axes=x.ndim)``. + + Parameters + ---------- + a : array_like + Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals + the shape of that sub-tensor of `a` consisting of the appropriate + number of its rightmost indices, and must be such that + ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be + 'square'). + b : array_like + Right-hand tensor, which can be of any shape. + axes : tuple of ints, optional + Axes in `a` to reorder to the right, before inversion. + If None (default), no reordering is done. + + Returns + ------- + x : ndarray, shape Q + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorinv, numpy.einsum + + Examples + -------- + >>> a = np.eye(2*3*4) + >>> a.shape = (2*3, 4, 2, 3, 4) + >>> b = np.random.randn(2*3, 4) + >>> x = np.linalg.tensorsolve(a, b) + >>> x.shape + (2, 3, 4) + >>> np.allclose(np.tensordot(a, x, axes=3), b) + True + + """ + a, wrap = _makearray(a) + b = asarray(b) + an = a.ndim + + if axes is not None: + allaxes = list(range(0, an)) + for k in axes: + allaxes.remove(k) + allaxes.insert(an, k) + a = a.transpose(allaxes) + + oldshape = a.shape[-(an-b.ndim):] + prod = 1 + for k in oldshape: + prod *= k + + if a.size != prod ** 2: + raise LinAlgError( + "Input arrays must satisfy the requirement \ + prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])" + ) + + a = a.reshape(prod, prod) + b = b.ravel() + res = wrap(solve(a, b)) + res.shape = oldshape + return res + + +def _solve_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_solve_dispatcher) +def solve(a, b): + """ + Solve a linear matrix equation, or system of linear scalar equations. + + Computes the "exact" solution, `x`, of the well-determined, i.e., full + rank, linear matrix equation `ax = b`. + + Parameters + ---------- + a : (..., M, M) array_like + Coefficient matrix. + b : {(M,), (..., M, K)}, array_like + Ordinate or "dependent variable" values. + + Returns + ------- + x : {(..., M,), (..., M, K)} ndarray + Solution to the system a x = b. Returned shape is (..., M) if b is + shape (M,) and (..., M, K) if b is (..., M, K), where the "..." part is + broadcasted between a and b. + + Raises + ------ + LinAlgError + If `a` is singular or not square. + + See Also + -------- + scipy.linalg.solve : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The solutions are computed using LAPACK routine ``_gesv``. + + `a` must be square and of full-rank, i.e., all rows (or, equivalently, + columns) must be linearly independent; if either is not true, use + `lstsq` for the least-squares best "solution" of the + system/equation. + + .. versionchanged:: 2.0 + + The b array is only treated as a shape (M,) column vector if it is + exactly 1-dimensional. In all other instances it is treated as a stack + of (M, K) matrices. Previously b would be treated as a stack of (M,) + vectors if b.ndim was equal to a.ndim - 1. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 22. + + Examples + -------- + Solve the system of equations: + ``x0 + 2 * x1 = 1`` and + ``3 * x0 + 5 * x1 = 2``: + + >>> a = np.array([[1, 2], [3, 5]]) + >>> b = np.array([1, 2]) + >>> x = np.linalg.solve(a, b) + >>> x + array([-1., 1.]) + + Check that the solution is correct: + + >>> np.allclose(np.dot(a, x), b) + True + + """ + a, _ = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + b, wrap = _makearray(b) + t, result_t = _commonType(a, b) + + # We use the b = (..., M,) logic, only if the number of extra dimensions + # match exactly + if b.ndim == 1: + gufunc = _umath_linalg.solve1 + else: + gufunc = _umath_linalg.solve + + signature = 'DD->D' if isComplexType(t) else 'dd->d' + with errstate(call=_raise_linalgerror_singular, invalid='call', + over='ignore', divide='ignore', under='ignore'): + r = gufunc(a, b, signature=signature) + + return wrap(r.astype(result_t, copy=False)) + + +def _tensorinv_dispatcher(a, ind=None): + return (a,) + + +@array_function_dispatch(_tensorinv_dispatcher) +def tensorinv(a, ind=2): + """ + Compute the 'inverse' of an N-dimensional array. + + The result is an inverse for `a` relative to the tensordot operation + ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, + ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the + tensordot operation. + + Parameters + ---------- + a : array_like + Tensor to 'invert'. Its shape must be 'square', i. e., + ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. + ind : int, optional + Number of first indices that are involved in the inverse sum. + Must be a positive integer, default is 2. + + Returns + ------- + b : ndarray + `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``. + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorsolve + + Examples + -------- + >>> a = np.eye(4*6) + >>> a.shape = (4, 6, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=2) + >>> ainv.shape + (8, 3, 4, 6) + >>> b = np.random.randn(4, 6) + >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) + True + + >>> a = np.eye(4*6) + >>> a.shape = (24, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=1) + >>> ainv.shape + (8, 3, 24) + >>> b = np.random.randn(24) + >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + True + + """ + a = asarray(a) + oldshape = a.shape + prod = 1 + if ind > 0: + invshape = oldshape[ind:] + oldshape[:ind] + for k in oldshape[ind:]: + prod *= k + else: + raise ValueError("Invalid ind argument.") + a = a.reshape(prod, -1) + ia = inv(a) + return ia.reshape(*invshape) + + +# Matrix inversion + +def _unary_dispatcher(a): + return (a,) + + +@array_function_dispatch(_unary_dispatcher) +def inv(a): + """ + Compute the inverse of a matrix. + + Given a square matrix `a`, return the matrix `ainv` satisfying + ``a @ ainv = ainv @ a = eye(a.shape[0])``. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be inverted. + + Returns + ------- + ainv : (..., M, M) ndarray or matrix + Inverse of the matrix `a`. + + Raises + ------ + LinAlgError + If `a` is not square or inversion fails. + + See Also + -------- + scipy.linalg.inv : Similar function in SciPy. + numpy.linalg.cond : Compute the condition number of a matrix. + numpy.linalg.svd : Compute the singular value decomposition of a matrix. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + If `a` is detected to be singular, a `LinAlgError` is raised. If `a` is + ill-conditioned, a `LinAlgError` may or may not be raised, and results may + be inaccurate due to floating-point errors. + + References + ---------- + .. [1] Wikipedia, "Condition number", + https://en.wikipedia.org/wiki/Condition_number + + Examples + -------- + >>> from numpy.linalg import inv + >>> a = np.array([[1., 2.], [3., 4.]]) + >>> ainv = inv(a) + >>> np.allclose(a @ ainv, np.eye(2)) + True + >>> np.allclose(ainv @ a, np.eye(2)) + True + + If a is a matrix object, then the return value is a matrix as well: + + >>> ainv = inv(np.matrix(a)) + >>> ainv + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + + Inverses of several matrices can be computed at once: + + >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) + >>> inv(a) + array([[[-2. , 1. ], + [ 1.5 , -0.5 ]], + [[-1.25, 0.75], + [ 0.75, -0.25]]]) + + If a matrix is close to singular, the computed inverse may not satisfy + ``a @ ainv = ainv @ a = eye(a.shape[0])`` even if a `LinAlgError` + is not raised: + + >>> a = np.array([[2,4,6],[2,0,2],[6,8,14]]) + >>> inv(a) # No errors raised + array([[-1.12589991e+15, -5.62949953e+14, 5.62949953e+14], + [-1.12589991e+15, -5.62949953e+14, 5.62949953e+14], + [ 1.12589991e+15, 5.62949953e+14, -5.62949953e+14]]) + >>> a @ inv(a) + array([[ 0. , -0.5 , 0. ], # may vary + [-0.5 , 0.625, 0.25 ], + [ 0. , 0. , 1. ]]) + + To detect ill-conditioned matrices, you can use `numpy.linalg.cond` to + compute its *condition number* [1]_. The larger the condition number, the + more ill-conditioned the matrix is. As a rule of thumb, if the condition + number ``cond(a) = 10**k``, then you may lose up to ``k`` digits of + accuracy on top of what would be lost to the numerical method due to loss + of precision from arithmetic methods. + + >>> from numpy.linalg import cond + >>> cond(a) + np.float64(8.659885634118668e+17) # may vary + + It is also possible to detect ill-conditioning by inspecting the matrix's + singular values directly. The ratio between the largest and the smallest + singular value is the condition number: + + >>> from numpy.linalg import svd + >>> sigma = svd(a, compute_uv=False) # Do not compute singular vectors + >>> sigma.max()/sigma.min() + 8.659885634118668e+17 # may vary + + """ + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_singular, invalid='call', + over='ignore', divide='ignore', under='ignore'): + ainv = _umath_linalg.inv(a, signature=signature) + return wrap(ainv.astype(result_t, copy=False)) + + +def _matrix_power_dispatcher(a, n): + return (a,) + + +@array_function_dispatch(_matrix_power_dispatcher) +def matrix_power(a, n): + """ + Raise a square matrix to the (integer) power `n`. + + For positive integers `n`, the power is computed by repeated matrix + squarings and matrix multiplications. If ``n == 0``, the identity matrix + of the same shape as M is returned. If ``n < 0``, the inverse + is computed and then raised to the ``abs(n)``. + + .. note:: Stacks of object matrices are not currently supported. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be "powered". + n : int + The exponent can be any integer or long integer, positive, + negative, or zero. + + Returns + ------- + a**n : (..., M, M) ndarray or matrix object + The return value is the same shape and type as `M`; + if the exponent is positive or zero then the type of the + elements is the same as those of `M`. If the exponent is + negative the elements are floating-point. + + Raises + ------ + LinAlgError + For matrices that are not square or that (for negative powers) cannot + be inverted numerically. + + Examples + -------- + >>> from numpy.linalg import matrix_power + >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit + >>> matrix_power(i, 3) # should = -i + array([[ 0, -1], + [ 1, 0]]) + >>> matrix_power(i, 0) + array([[1, 0], + [0, 1]]) + >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements + array([[ 0., 1.], + [-1., 0.]]) + + Somewhat more sophisticated example + + >>> q = np.zeros((4, 4)) + >>> q[0:2, 0:2] = -i + >>> q[2:4, 2:4] = i + >>> q # one of the three quaternion units not equal to 1 + array([[ 0., -1., 0., 0.], + [ 1., 0., 0., 0.], + [ 0., 0., 0., 1.], + [ 0., 0., -1., 0.]]) + >>> matrix_power(q, 2) # = -np.eye(4) + array([[-1., 0., 0., 0.], + [ 0., -1., 0., 0.], + [ 0., 0., -1., 0.], + [ 0., 0., 0., -1.]]) + + """ + a = asanyarray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + + try: + n = operator.index(n) + except TypeError as e: + raise TypeError("exponent must be an integer") from e + + # Fall back on dot for object arrays. Object arrays are not supported by + # the current implementation of matmul using einsum + if a.dtype != object: + fmatmul = matmul + elif a.ndim == 2: + fmatmul = dot + else: + raise NotImplementedError( + "matrix_power not supported for stacks of object arrays") + + if n == 0: + a = empty_like(a) + a[...] = eye(a.shape[-2], dtype=a.dtype) + return a + + elif n < 0: + a = inv(a) + n = abs(n) + + # short-cuts. + if n == 1: + return a + + elif n == 2: + return fmatmul(a, a) + + elif n == 3: + return fmatmul(fmatmul(a, a), a) + + # Use binary decomposition to reduce the number of matrix multiplications. + # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to + # increasing powers of 2, and multiply into the result as needed. + z = result = None + while n > 0: + z = a if z is None else fmatmul(z, z) + n, bit = divmod(n, 2) + if bit: + result = z if result is None else fmatmul(result, z) + + return result + + +# Cholesky decomposition + +def _cholesky_dispatcher(a, /, *, upper=None): + return (a,) + + +@array_function_dispatch(_cholesky_dispatcher) +def cholesky(a, /, *, upper=False): + """ + Cholesky decomposition. + + Return the lower or upper Cholesky decomposition, ``L * L.H`` or + ``U.H * U``, of the square matrix ``a``, where ``L`` is lower-triangular, + ``U`` is upper-triangular, and ``.H`` is the conjugate transpose operator + (which is the ordinary transpose if ``a`` is real-valued). ``a`` must be + Hermitian (symmetric if real-valued) and positive-definite. No checking is + performed to verify whether ``a`` is Hermitian or not. In addition, only + the lower or upper-triangular and diagonal elements of ``a`` are used. + Only ``L`` or ``U`` is actually returned. + + Parameters + ---------- + a : (..., M, M) array_like + Hermitian (symmetric if all elements are real), positive-definite + input matrix. + upper : bool + If ``True``, the result must be the upper-triangular Cholesky factor. + If ``False``, the result must be the lower-triangular Cholesky factor. + Default: ``False``. + + Returns + ------- + L : (..., M, M) array_like + Lower or upper-triangular Cholesky factor of `a`. Returns a matrix + object if `a` is a matrix object. + + Raises + ------ + LinAlgError + If the decomposition fails, for example, if `a` is not + positive-definite. + + See Also + -------- + scipy.linalg.cholesky : Similar function in SciPy. + scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian + positive-definite matrix. + scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in + `scipy.linalg.cho_solve`. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The Cholesky decomposition is often used as a fast way of solving + + .. math:: A \\mathbf{x} = \\mathbf{b} + + (when `A` is both Hermitian/symmetric and positive-definite). + + First, we solve for :math:`\\mathbf{y}` in + + .. math:: L \\mathbf{y} = \\mathbf{b}, + + and then for :math:`\\mathbf{x}` in + + .. math:: L^{H} \\mathbf{x} = \\mathbf{y}. + + Examples + -------- + >>> A = np.array([[1,-2j],[2j,5]]) + >>> A + array([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> L = np.linalg.cholesky(A) + >>> L + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) + >>> np.dot(L, L.T.conj()) # verify that L * L.H = A + array([[1.+0.j, 0.-2.j], + [0.+2.j, 5.+0.j]]) + >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? + >>> np.linalg.cholesky(A) # an ndarray object is returned + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) + >>> # But a matrix object is returned if A is a matrix object + >>> np.linalg.cholesky(np.matrix(A)) + matrix([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + >>> # The upper-triangular Cholesky factor can also be obtained. + >>> np.linalg.cholesky(A, upper=True) + array([[1.-0.j, 0.-2.j], + [0.-0.j, 1.-0.j]]) + + """ + gufunc = _umath_linalg.cholesky_up if upper else _umath_linalg.cholesky_lo + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_nonposdef, invalid='call', + over='ignore', divide='ignore', under='ignore'): + r = gufunc(a, signature=signature) + return wrap(r.astype(result_t, copy=False)) + + +# outer product + + +def _outer_dispatcher(x1, x2): + return (x1, x2) + + +@array_function_dispatch(_outer_dispatcher) +def outer(x1, x2, /): + """ + Compute the outer product of two vectors. + + This function is Array API compatible. Compared to ``np.outer`` + it accepts 1-dimensional inputs only. + + Parameters + ---------- + x1 : (M,) array_like + One-dimensional input array of size ``N``. + Must have a numeric data type. + x2 : (N,) array_like + One-dimensional input array of size ``M``. + Must have a numeric data type. + + Returns + ------- + out : (M, N) ndarray + ``out[i, j] = a[i] * b[j]`` + + See also + -------- + outer + + """ + x1 = asanyarray(x1) + x2 = asanyarray(x2) + if x1.ndim != 1 or x2.ndim != 1: + raise ValueError( + "Input arrays must be one-dimensional, but they are " + f"{x1.ndim=} and {x2.ndim=}." + ) + return _core_outer(x1, x2, out=None) + + +# QR decomposition + + +def _qr_dispatcher(a, mode=None): + return (a,) + + +@array_function_dispatch(_qr_dispatcher) +def qr(a, mode='reduced'): + """ + Compute the qr factorization of a matrix. + + Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is + upper-triangular. + + Parameters + ---------- + a : array_like, shape (..., M, N) + An array-like object with the dimensionality of at least 2. + mode : {'reduced', 'complete', 'r', 'raw'}, optional, default: 'reduced' + If K = min(M, N), then + + * 'reduced' : returns Q, R with dimensions (..., M, K), (..., K, N) + * 'complete' : returns Q, R with dimensions (..., M, M), (..., M, N) + * 'r' : returns R only with dimensions (..., K, N) + * 'raw' : returns h, tau with dimensions (..., N, M), (..., K,) + + The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, + see the notes for more information. The default is 'reduced', and to + maintain backward compatibility with earlier versions of numpy both + it and the old default 'full' can be omitted. Note that array h + returned in 'raw' mode is transposed for calling Fortran. The + 'economic' mode is deprecated. The modes 'full' and 'economic' may + be passed using only the first letter for backwards compatibility, + but all others must be spelled out. See the Notes for more + explanation. + + + Returns + ------- + When mode is 'reduced' or 'complete', the result will be a namedtuple with + the attributes `Q` and `R`. + + Q : ndarray of float or complex, optional + A matrix with orthonormal columns. When mode = 'complete' the + result is an orthogonal/unitary matrix depending on whether or not + a is real/complex. The determinant may be either +/- 1 in that + case. In case the number of dimensions in the input array is + greater than 2 then a stack of the matrices with above properties + is returned. + R : ndarray of float or complex, optional + The upper-triangular matrix or a stack of upper-triangular + matrices if the number of dimensions in the input array is greater + than 2. + (h, tau) : ndarrays of np.double or np.cdouble, optional + The array h contains the Householder reflectors that generate q + along with r. The tau array contains scaling factors for the + reflectors. In the deprecated 'economic' mode only h is returned. + + Raises + ------ + LinAlgError + If factoring fails. + + See Also + -------- + scipy.linalg.qr : Similar function in SciPy. + scipy.linalg.rq : Compute RQ decomposition of a matrix. + + Notes + ----- + This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``, + ``dorgqr``, and ``zungqr``. + + For more information on the qr factorization, see for example: + https://en.wikipedia.org/wiki/QR_factorization + + Subclasses of `ndarray` are preserved except for the 'raw' mode. So if + `a` is of type `matrix`, all the return values will be matrices too. + + New 'reduced', 'complete', and 'raw' options for mode were added in + NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In + addition the options 'full' and 'economic' were deprecated. Because + 'full' was the previous default and 'reduced' is the new default, + backward compatibility can be maintained by letting `mode` default. + The 'raw' option was added so that LAPACK routines that can multiply + arrays by q using the Householder reflectors can be used. Note that in + this case the returned arrays are of type np.double or np.cdouble and + the h array is transposed to be FORTRAN compatible. No routines using + the 'raw' return are currently exposed by numpy, but some are available + in lapack_lite and just await the necessary work. + + Examples + -------- + >>> a = np.random.randn(9, 6) + >>> Q, R = np.linalg.qr(a) + >>> np.allclose(a, np.dot(Q, R)) # a does equal QR + True + >>> R2 = np.linalg.qr(a, mode='r') + >>> np.allclose(R, R2) # mode='r' returns the same R as mode='full' + True + >>> a = np.random.normal(size=(3, 2, 2)) # Stack of 2 x 2 matrices as input + >>> Q, R = np.linalg.qr(a) + >>> Q.shape + (3, 2, 2) + >>> R.shape + (3, 2, 2) + >>> np.allclose(a, np.matmul(Q, R)) + True + + Example illustrating a common use of `qr`: solving of least squares + problems + + What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for + the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points + and you'll see that it should be y0 = 0, m = 1.) The answer is provided + by solving the over-determined matrix equation ``Ax = b``, where:: + + A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) + x = array([[y0], [m]]) + b = array([[1], [0], [2], [1]]) + + If A = QR such that Q is orthonormal (which is always possible via + Gram-Schmidt), then ``x = inv(R) * (Q.T) * b``. (In numpy practice, + however, we simply use `lstsq`.) + + >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) + >>> A + array([[0, 1], + [1, 1], + [1, 1], + [2, 1]]) + >>> b = np.array([1, 2, 2, 3]) + >>> Q, R = np.linalg.qr(A) + >>> p = np.dot(Q.T, b) + >>> np.dot(np.linalg.inv(R), p) + array([ 1., 1.]) + + """ + if mode not in ('reduced', 'complete', 'r', 'raw'): + if mode in ('f', 'full'): + # 2013-04-01, 1.8 + msg = "".join(( + "The 'full' option is deprecated in favor of 'reduced'.\n", + "For backward compatibility let mode default.")) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + mode = 'reduced' + elif mode in ('e', 'economic'): + # 2013-04-01, 1.8 + msg = "The 'economic' option is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=2) + mode = 'economic' + else: + raise ValueError(f"Unrecognized mode '{mode}'") + + a, wrap = _makearray(a) + _assert_stacked_2d(a) + m, n = a.shape[-2:] + t, result_t = _commonType(a) + a = a.astype(t, copy=True) + a = _to_native_byte_order(a) + mn = min(m, n) + + if m <= n: + gufunc = _umath_linalg.qr_r_raw_m + else: + gufunc = _umath_linalg.qr_r_raw_n + + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_qr, invalid='call', + over='ignore', divide='ignore', under='ignore'): + tau = gufunc(a, signature=signature) + + # handle modes that don't return q + if mode == 'r': + r = triu(a[..., :mn, :]) + r = r.astype(result_t, copy=False) + return wrap(r) + + if mode == 'raw': + q = transpose(a) + q = q.astype(result_t, copy=False) + tau = tau.astype(result_t, copy=False) + return wrap(q), tau + + if mode == 'economic': + a = a.astype(result_t, copy=False) + return wrap(a) + + # mc is the number of columns in the resulting q + # matrix. If the mode is complete then it is + # same as number of rows, and if the mode is reduced, + # then it is the minimum of number of rows and columns. + if mode == 'complete' and m > n: + mc = m + gufunc = _umath_linalg.qr_complete + else: + mc = mn + gufunc = _umath_linalg.qr_reduced + + signature = 'DD->D' if isComplexType(t) else 'dd->d' + with errstate(call=_raise_linalgerror_qr, invalid='call', + over='ignore', divide='ignore', under='ignore'): + q = gufunc(a, tau, signature=signature) + r = triu(a[..., :mc, :]) + + q = q.astype(result_t, copy=False) + r = r.astype(result_t, copy=False) + + return QRResult(wrap(q), wrap(r)) + +# Eigenvalues + + +@array_function_dispatch(_unary_dispatcher) +def eigvals(a): + """ + Compute the eigenvalues of a general matrix. + + Main difference between `eigvals` and `eig`: the eigenvectors aren't + returned. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues will be computed. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues, each repeated according to its multiplicity. + They are not necessarily ordered, nor are they necessarily + real for real matrices. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eig : eigenvalues and right eigenvectors of general arrays + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigh : eigenvalues and eigenvectors of real symmetric or complex + Hermitian (conjugate symmetric) arrays. + scipy.linalg.eigvals : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the ``_geev`` LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + Examples + -------- + Illustration, using the fact that the eigenvalues of a diagonal matrix + are its diagonal elements, that multiplying a matrix on the left + by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose + of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, + if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as + ``A``: + + >>> from numpy import linalg as LA + >>> x = np.random.random() + >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) + >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) + (1.0, 1.0, 0.0) + + Now multiply a diagonal matrix by ``Q`` on one side and + by ``Q.T`` on the other: + + >>> D = np.diag((-1,1)) + >>> LA.eigvals(D) + array([-1., 1.]) + >>> A = np.dot(Q, D) + >>> A = np.dot(A, Q.T) + >>> LA.eigvals(A) + array([ 1., -1.]) # random + + """ + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + _assert_finite(a) + t, result_t = _commonType(a) + + signature = 'D->D' if isComplexType(t) else 'd->D' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w = _umath_linalg.eigvals(a, signature=signature) + + if not isComplexType(t): + if all(w.imag == 0): + w = w.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + return w.astype(result_t, copy=False) + + +def _eigvalsh_dispatcher(a, UPLO=None): + return (a,) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigvalsh(a, UPLO='L'): + """ + Compute the eigenvalues of a complex Hermitian or real symmetric matrix. + + Main difference from eigh: the eigenvectors are not computed. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues are to be + computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigvals : eigenvalues of general real or complex arrays. + eig : eigenvalues and right eigenvectors of general real or complex + arrays. + scipy.linalg.eigvalsh : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``. + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> LA.eigvalsh(a) + array([ 0.17157288, 5.82842712]) # may vary + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals() + >>> # with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) + >>> wa = LA.eigvalsh(a) + >>> wb = LA.eigvals(b) + >>> wa; wb + array([1., 6.]) + array([6.+0.j, 1.+0.j]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + if UPLO == 'L': + gufunc = _umath_linalg.eigvalsh_lo + else: + gufunc = _umath_linalg.eigvalsh_up + + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->d' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w = gufunc(a, signature=signature) + return w.astype(_realType(result_t), copy=False) + +def _convertarray(a): + t, result_t = _commonType(a) + a = a.astype(t).T.copy() + return a, t, result_t + + +# Eigenvectors + + +@array_function_dispatch(_unary_dispatcher) +def eig(a): + """ + Compute the eigenvalues and right eigenvectors of a square array. + + Parameters + ---------- + a : (..., M, M) array + Matrices for which the eigenvalues and right eigenvectors will + be computed + + Returns + ------- + A namedtuple with the following attributes: + + eigenvalues : (..., M) array + The eigenvalues, each repeated according to its multiplicity. + The eigenvalues are not necessarily ordered. The resulting + array will be of complex type, unless the imaginary part is + zero in which case it will be cast to a real type. When `a` + is real the resulting eigenvalues will be real (0 imaginary + part) or occur in conjugate pairs + + eigenvectors : (..., M, M) array + The normalized (unit "length") eigenvectors, such that the + column ``eigenvectors[:,i]`` is the eigenvector corresponding to the + eigenvalue ``eigenvalues[i]``. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvals : eigenvalues of a non-symmetric array. + eigh : eigenvalues and eigenvectors of a real symmetric or complex + Hermitian (conjugate symmetric) array. + eigvalsh : eigenvalues of a real symmetric or complex Hermitian + (conjugate symmetric) array. + scipy.linalg.eig : Similar function in SciPy that also solves the + generalized eigenvalue problem. + scipy.linalg.schur : Best choice for unitary and other non-Hermitian + normal matrices. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the ``_geev`` LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + The number `w` is an eigenvalue of `a` if there exists a vector `v` such + that ``a @ v = w * v``. Thus, the arrays `a`, `eigenvalues`, and + `eigenvectors` satisfy the equations ``a @ eigenvectors[:,i] = + eigenvalues[i] * eigenvectors[:,i]`` for :math:`i \\in \\{0,...,M-1\\}`. + + The array `eigenvectors` may not be of maximum rank, that is, some of the + columns may be linearly dependent, although round-off error may obscure + that fact. If the eigenvalues are all different, then theoretically the + eigenvectors are linearly independent and `a` can be diagonalized by a + similarity transformation using `eigenvectors`, i.e, ``inv(eigenvectors) @ + a @ eigenvectors`` is diagonal. + + For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur` + is preferred because the matrix `eigenvectors` is guaranteed to be + unitary, which is not the case when using `eig`. The Schur factorization + produces an upper triangular matrix rather than a diagonal matrix, but for + normal matrices only the diagonal of the upper triangular matrix is + needed, the rest is roundoff error. + + Finally, it is emphasized that `eigenvectors` consists of the *right* (as + in right-hand side) eigenvectors of `a`. A vector `y` satisfying ``y.T @ a + = z * y.T`` for some number `z` is called a *left* eigenvector of `a`, + and, in general, the left and right eigenvectors of a matrix are not + necessarily the (perhaps conjugate) transposes of each other. + + References + ---------- + G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, + Academic Press, Inc., 1980, Various pp. + + Examples + -------- + >>> from numpy import linalg as LA + + (Almost) trivial example with real eigenvalues and eigenvectors. + + >>> eigenvalues, eigenvectors = LA.eig(np.diag((1, 2, 3))) + >>> eigenvalues + array([1., 2., 3.]) + >>> eigenvectors + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Real matrix possessing complex eigenvalues and eigenvectors; + note that the eigenvalues are complex conjugates of each other. + + >>> eigenvalues, eigenvectors = LA.eig(np.array([[1, -1], [1, 1]])) + >>> eigenvalues + array([1.+1.j, 1.-1.j]) + >>> eigenvectors + array([[0.70710678+0.j , 0.70710678-0.j ], + [0. -0.70710678j, 0. +0.70710678j]]) + + Complex-valued matrix with real eigenvalues (but complex-valued + eigenvectors); note that ``a.conj().T == a``, i.e., `a` is Hermitian. + + >>> a = np.array([[1, 1j], [-1j, 1]]) + >>> eigenvalues, eigenvectors = LA.eig(a) + >>> eigenvalues + array([2.+0.j, 0.+0.j]) + >>> eigenvectors + array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary + [ 0.70710678+0.j , -0. +0.70710678j]]) + + Be careful about round-off error! + + >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) + >>> # Theor. eigenvalues are 1 +/- 1e-9 + >>> eigenvalues, eigenvectors = LA.eig(a) + >>> eigenvalues + array([1., 1.]) + >>> eigenvectors + array([[1., 0.], + [0., 1.]]) + + """ + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + _assert_finite(a) + t, result_t = _commonType(a) + + signature = 'D->DD' if isComplexType(t) else 'd->DD' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w, vt = _umath_linalg.eig(a, signature=signature) + + if not isComplexType(t) and all(w.imag == 0.0): + w = w.real + vt = vt.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + vt = vt.astype(result_t, copy=False) + return EigResult(w.astype(result_t, copy=False), wrap(vt)) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigh(a, UPLO='L'): + """ + Return the eigenvalues and eigenvectors of a complex Hermitian + (conjugate symmetric) or a real symmetric matrix. + + Returns two objects, a 1-D array containing the eigenvalues of `a`, and + a 2-D square array or matrix (depending on the input type) of the + corresponding eigenvectors (in columns). + + Parameters + ---------- + a : (..., M, M) array + Hermitian or real symmetric matrices whose eigenvalues and + eigenvectors are to be computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + A namedtuple with the following attributes: + + eigenvalues : (..., M) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + eigenvectors : {(..., M, M) ndarray, (..., M, M) matrix} + The column ``eigenvectors[:, i]`` is the normalized eigenvector + corresponding to the eigenvalue ``eigenvalues[i]``. Will return a + matrix object if `a` is a matrix object. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eig : eigenvalues and right eigenvectors for non-symmetric arrays. + eigvals : eigenvalues of non-symmetric arrays. + scipy.linalg.eigh : Similar function in SciPy (but also solves the + generalized eigenvalue problem). + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``, + ``_heevd``. + + The eigenvalues of real symmetric or complex Hermitian matrices are always + real. [1]_ The array `eigenvalues` of (column) eigenvectors is unitary and + `a`, `eigenvalues`, and `eigenvectors` satisfy the equations ``dot(a, + eigenvectors[:, i]) = eigenvalues[i] * eigenvectors[:, i]``. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 222. + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> a + array([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> eigenvalues, eigenvectors = LA.eigh(a) + >>> eigenvalues + array([0.17157288, 5.82842712]) + >>> eigenvectors + array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) + + >>> (np.dot(a, eigenvectors[:, 0]) - + ... eigenvalues[0] * eigenvectors[:, 0]) # verify 1st eigenval/vec pair + array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j]) + >>> (np.dot(a, eigenvectors[:, 1]) - + ... eigenvalues[1] * eigenvectors[:, 1]) # verify 2nd eigenval/vec pair + array([0.+0.j, 0.+0.j]) + + >>> A = np.matrix(a) # what happens if input is a matrix object + >>> A + matrix([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> eigenvalues, eigenvectors = LA.eigh(A) + >>> eigenvalues + array([0.17157288, 5.82842712]) + >>> eigenvectors + matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) + >>> wa, va = LA.eigh(a) + >>> wb, vb = LA.eig(b) + >>> wa; wb + array([1., 6.]) + array([6.+0.j, 1.+0.j]) + >>> va; vb + array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary + [ 0. +0.89442719j, 0. -0.4472136j ]]) + array([[ 0.89442719+0.j , -0. +0.4472136j], + [-0. +0.4472136j, 0.89442719+0.j ]]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + + if UPLO == 'L': + gufunc = _umath_linalg.eigh_lo + else: + gufunc = _umath_linalg.eigh_up + + signature = 'D->dD' if isComplexType(t) else 'd->dd' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w, vt = gufunc(a, signature=signature) + w = w.astype(_realType(result_t), copy=False) + vt = vt.astype(result_t, copy=False) + return EighResult(w, wrap(vt)) + + +# Singular value decomposition + +def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None): + return (a,) + + +@array_function_dispatch(_svd_dispatcher) +def svd(a, full_matrices=True, compute_uv=True, hermitian=False): + """ + Singular Value Decomposition. + + When `a` is a 2D array, and ``full_matrices=False``, then it is + factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``, where + `u` and the Hermitian transpose of `vh` are 2D arrays with + orthonormal columns and `s` is a 1D array of `a`'s singular + values. When `a` is higher-dimensional, SVD is applied in + stacked mode as explained below. + + Parameters + ---------- + a : (..., M, N) array_like + A real or complex array with ``a.ndim >= 2``. + full_matrices : bool, optional + If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and + ``(..., N, N)``, respectively. Otherwise, the shapes are + ``(..., M, K)`` and ``(..., K, N)``, respectively, where + ``K = min(M, N)``. + compute_uv : bool, optional + Whether or not to compute `u` and `vh` in addition to `s`. True + by default. + hermitian : bool, optional + If True, `a` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + .. versionadded:: 1.17.0 + + Returns + ------- + When `compute_uv` is True, the result is a namedtuple with the following + attribute names: + + U : { (..., M, M), (..., M, K) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + S : (..., K) array + Vector(s) with the singular values, within each vector sorted in + descending order. The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. + Vh : { (..., N, N), (..., K, N) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + + Raises + ------ + LinAlgError + If SVD computation does not converge. + + See Also + -------- + scipy.linalg.svd : Similar function in SciPy. + scipy.linalg.svdvals : Compute singular values of a matrix. + + Notes + ----- + + .. versionchanged:: 1.8.0 + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The decomposition is performed using LAPACK routine ``_gesdd``. + + SVD is usually described for the factorization of a 2D matrix :math:`A`. + The higher-dimensional case will be discussed below. In the 2D case, SVD is + written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`, + :math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s` + contains the singular values of `a` and `u` and `vh` are unitary. The rows + of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are + the eigenvectors of :math:`A A^H`. In both cases the corresponding + (possibly non-zero) eigenvalues are given by ``s**2``. + + If `a` has more than two dimensions, then broadcasting rules apply, as + explained in :ref:`routines.linalg-broadcasting`. This means that SVD is + working in "stacked" mode: it iterates over all indices of the first + ``a.ndim - 2`` dimensions and for each combination SVD is applied to the + last two indices. The matrix `a` can be reconstructed from the + decomposition with either ``(u * s[..., None, :]) @ vh`` or + ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the + function ``np.matmul`` for python versions below 3.5.) + + If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are + all the return values. + + Examples + -------- + >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) + >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3) + + Reconstruction based on full SVD, 2D case: + + >>> U, S, Vh = np.linalg.svd(a, full_matrices=True) + >>> U.shape, S.shape, Vh.shape + ((9, 9), (6,), (6, 6)) + >>> np.allclose(a, np.dot(U[:, :6] * S, Vh)) + True + >>> smat = np.zeros((9, 6), dtype=complex) + >>> smat[:6, :6] = np.diag(S) + >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) + True + + Reconstruction based on reduced SVD, 2D case: + + >>> U, S, Vh = np.linalg.svd(a, full_matrices=False) + >>> U.shape, S.shape, Vh.shape + ((9, 6), (6,), (6, 6)) + >>> np.allclose(a, np.dot(U * S, Vh)) + True + >>> smat = np.diag(S) + >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) + True + + Reconstruction based on full SVD, 4D case: + + >>> U, S, Vh = np.linalg.svd(b, full_matrices=True) + >>> U.shape, S.shape, Vh.shape + ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(U[..., :3] * S[..., None, :], Vh)) + True + >>> np.allclose(b, np.matmul(U[..., :3], S[..., None] * Vh)) + True + + Reconstruction based on reduced SVD, 4D case: + + >>> U, S, Vh = np.linalg.svd(b, full_matrices=False) + >>> U.shape, S.shape, Vh.shape + ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(U * S[..., None, :], Vh)) + True + >>> np.allclose(b, np.matmul(U, S[..., None] * Vh)) + True + + """ + import numpy as _nx + a, wrap = _makearray(a) + + if hermitian: + # note: lapack svd returns eigenvalues with s ** 2 sorted descending, + # but eig returns s sorted ascending, so we re-order the eigenvalues + # and related arrays to have the correct order + if compute_uv: + s, u = eigh(a) + sgn = sign(s) + s = abs(s) + sidx = argsort(s)[..., ::-1] + sgn = _nx.take_along_axis(sgn, sidx, axis=-1) + s = _nx.take_along_axis(s, sidx, axis=-1) + u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1) + # singular values are unsigned, move the sign into v + vt = transpose(u * sgn[..., None, :]).conjugate() + return SVDResult(wrap(u), s, wrap(vt)) + else: + s = eigvalsh(a) + s = abs(s) + return sort(s)[..., ::-1] + + _assert_stacked_2d(a) + t, result_t = _commonType(a) + + m, n = a.shape[-2:] + if compute_uv: + if full_matrices: + if m < n: + gufunc = _umath_linalg.svd_m_f + else: + gufunc = _umath_linalg.svd_n_f + else: + if m < n: + gufunc = _umath_linalg.svd_m_s + else: + gufunc = _umath_linalg.svd_n_s + + signature = 'D->DdD' if isComplexType(t) else 'd->ddd' + with errstate(call=_raise_linalgerror_svd_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + u, s, vh = gufunc(a, signature=signature) + u = u.astype(result_t, copy=False) + s = s.astype(_realType(result_t), copy=False) + vh = vh.astype(result_t, copy=False) + return SVDResult(wrap(u), s, wrap(vh)) + else: + if m < n: + gufunc = _umath_linalg.svd_m + else: + gufunc = _umath_linalg.svd_n + + signature = 'D->d' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_svd_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + s = gufunc(a, signature=signature) + s = s.astype(_realType(result_t), copy=False) + return s + + +def _svdvals_dispatcher(x): + return (x,) + + +@array_function_dispatch(_svdvals_dispatcher) +def svdvals(x, /): + """ + Returns the singular values of a matrix (or a stack of matrices) ``x``. + When x is a stack of matrices, the function will compute the singular + values for each matrix in the stack. + + This function is Array API compatible. + + Calling ``np.svdvals(x)`` to get singular values is the same as + ``np.svd(x, compute_uv=False, hermitian=False)``. + + Parameters + ---------- + x : (..., M, N) array_like + Input array having shape (..., M, N) and whose last two + dimensions form matrices on which to perform singular value + decomposition. Should have a floating-point data type. + + Returns + ------- + out : ndarray + An array with shape (..., K) that contains the vector(s) + of singular values of length K, where K = min(M, N). + + See Also + -------- + scipy.linalg.svdvals : Compute singular values of a matrix. + + """ + return svd(x, compute_uv=False, hermitian=False) + + +def _cond_dispatcher(x, p=None): + return (x,) + + +@array_function_dispatch(_cond_dispatcher) +def cond(x, p=None): + """ + Compute the condition number of a matrix. + + This function is capable of returning the condition number using + one of seven different norms, depending on the value of `p` (see + Parameters below). + + Parameters + ---------- + x : (..., M, N) array_like + The matrix whose condition number is sought. + p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional + Order of the norm used in the condition number computation: + + ===== ============================ + p norm for matrices + ===== ============================ + None 2-norm, computed directly using the ``SVD`` + 'fro' Frobenius norm + inf max(sum(abs(x), axis=1)) + -inf min(sum(abs(x), axis=1)) + 1 max(sum(abs(x), axis=0)) + -1 min(sum(abs(x), axis=0)) + 2 2-norm (largest sing. value) + -2 smallest singular value + ===== ============================ + + inf means the `numpy.inf` object, and the Frobenius norm is + the root-of-sum-of-squares norm. + + Returns + ------- + c : {float, inf} + The condition number of the matrix. May be infinite. + + See Also + -------- + numpy.linalg.norm + + Notes + ----- + The condition number of `x` is defined as the norm of `x` times the + norm of the inverse of `x` [1]_; the norm can be the usual L2-norm + (root-of-sum-of-squares) or one of a number of other matrix norms. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, + Academic Press, Inc., 1980, pg. 285. + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) + >>> a + array([[ 1, 0, -1], + [ 0, 1, 0], + [ 1, 0, 1]]) + >>> LA.cond(a) + 1.4142135623730951 + >>> LA.cond(a, 'fro') + 3.1622776601683795 + >>> LA.cond(a, np.inf) + 2.0 + >>> LA.cond(a, -np.inf) + 1.0 + >>> LA.cond(a, 1) + 2.0 + >>> LA.cond(a, -1) + 1.0 + >>> LA.cond(a, 2) + 1.4142135623730951 + >>> LA.cond(a, -2) + 0.70710678118654746 # may vary + >>> (min(LA.svd(a, compute_uv=False)) * + ... min(LA.svd(LA.inv(a), compute_uv=False))) + 0.70710678118654746 # may vary + + """ + x = asarray(x) # in case we have a matrix + if _is_empty_2d(x): + raise LinAlgError("cond is not defined on empty arrays") + if p is None or p == 2 or p == -2: + s = svd(x, compute_uv=False) + with errstate(all='ignore'): + if p == -2: + r = s[..., -1] / s[..., 0] + else: + r = s[..., 0] / s[..., -1] + else: + # Call inv(x) ignoring errors. The result array will + # contain nans in the entries where inversion failed. + _assert_stacked_2d(x) + _assert_stacked_square(x) + t, result_t = _commonType(x) + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(all='ignore'): + invx = _umath_linalg.inv(x, signature=signature) + r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1)) + r = r.astype(result_t, copy=False) + + # Convert nans to infs unless the original array had nan entries + r = asarray(r) + nan_mask = isnan(r) + if nan_mask.any(): + nan_mask &= ~isnan(x).any(axis=(-2, -1)) + if r.ndim > 0: + r[nan_mask] = inf + elif nan_mask: + r[()] = inf + + # Convention is to return scalars instead of 0d arrays + if r.ndim == 0: + r = r[()] + + return r + + +def _matrix_rank_dispatcher(A, tol=None, hermitian=None, *, rtol=None): + return (A,) + + +@array_function_dispatch(_matrix_rank_dispatcher) +def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): + """ + Return matrix rank of array using SVD method + + Rank of the array is the number of singular values of the array that are + greater than `tol`. + + .. versionchanged:: 1.14 + Can now operate on stacks of matrices + + Parameters + ---------- + A : {(M,), (..., M, N)} array_like + Input vector or stack of matrices. + tol : (...) array_like, float, optional + Threshold below which SVD values are considered zero. If `tol` is + None, and ``S`` is an array with singular values for `M`, and + ``eps`` is the epsilon value for datatype of ``S``, then `tol` is + set to ``S.max() * max(M, N) * eps``. + + .. versionchanged:: 1.14 + Broadcasted against the stack of matrices + hermitian : bool, optional + If True, `A` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + .. versionadded:: 1.14 + rtol : (...) array_like, float, optional + Parameter for the relative tolerance component. Only ``tol`` or + ``rtol`` can be set at a time. Defaults to ``max(M, N) * eps``. + + .. versionadded:: 2.0.0 + + Returns + ------- + rank : (...) array_like + Rank of A. + + Notes + ----- + The default threshold to detect rank deficiency is a test on the magnitude + of the singular values of `A`. By default, we identify singular values + less than ``S.max() * max(M, N) * eps`` as indicating rank deficiency + (with the symbols defined above). This is the algorithm MATLAB uses [1]. + It also appears in *Numerical recipes* in the discussion of SVD solutions + for linear least squares [2]. + + This default threshold is designed to detect rank deficiency accounting + for the numerical errors of the SVD computation. Imagine that there + is a column in `A` that is an exact (in floating point) linear combination + of other columns in `A`. Computing the SVD on `A` will not produce + a singular value exactly equal to 0 in general: any difference of + the smallest SVD value from 0 will be caused by numerical imprecision + in the calculation of the SVD. Our threshold for small SVD values takes + this numerical imprecision into account, and the default threshold will + detect such numerical rank deficiency. The threshold may declare a matrix + `A` rank deficient even if the linear combination of some columns of `A` + is not exactly equal to another column of `A` but only numerically very + close to another column of `A`. + + We chose our default threshold because it is in wide use. Other thresholds + are possible. For example, elsewhere in the 2007 edition of *Numerical + recipes* there is an alternative threshold of ``S.max() * + np.finfo(A.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe + this threshold as being based on "expected roundoff error" (p 71). + + The thresholds above deal with floating point roundoff error in the + calculation of the SVD. However, you may have more information about + the sources of error in `A` that would make you consider other tolerance + values to detect *effective* rank deficiency. The most useful measure + of the tolerance depends on the operations you intend to use on your + matrix. For example, if your data come from uncertain measurements with + uncertainties greater than floating point epsilon, choosing a tolerance + near that uncertainty may be preferable. The tolerance may be absolute + if the uncertainties are absolute rather than relative. + + References + ---------- + .. [1] MATLAB reference documentation, "Rank" + https://www.mathworks.com/help/techdoc/ref/rank.html + .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, + "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, + page 795. + + Examples + -------- + >>> from numpy.linalg import matrix_rank + >>> matrix_rank(np.eye(4)) # Full rank matrix + 4 + >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix + >>> matrix_rank(I) + 3 + >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 + 1 + >>> matrix_rank(np.zeros((4,))) + 0 + """ + if rtol is not None and tol is not None: + raise ValueError("`tol` and `rtol` can't be both set.") + + A = asarray(A) + if A.ndim < 2: + return int(not all(A == 0)) + S = svd(A, compute_uv=False, hermitian=hermitian) + + if tol is None: + if rtol is None: + rtol = max(A.shape[-2:]) * finfo(S.dtype).eps + else: + rtol = asarray(rtol)[..., newaxis] + tol = S.max(axis=-1, keepdims=True) * rtol + else: + tol = asarray(tol)[..., newaxis] + + return count_nonzero(S > tol, axis=-1) + + +# Generalized inverse + +def _pinv_dispatcher(a, rcond=None, hermitian=None, *, rtol=None): + return (a,) + + +@array_function_dispatch(_pinv_dispatcher) +def pinv(a, rcond=None, hermitian=False, *, rtol=_NoValue): + """ + Compute the (Moore-Penrose) pseudo-inverse of a matrix. + + Calculate the generalized inverse of a matrix using its + singular-value decomposition (SVD) and including all + *large* singular values. + + .. versionchanged:: 1.14 + Can now operate on stacks of matrices + + Parameters + ---------- + a : (..., M, N) array_like + Matrix or stack of matrices to be pseudo-inverted. + rcond : (...) array_like of float, optional + Cutoff for small singular values. + Singular values less than or equal to + ``rcond * largest_singular_value`` are set to zero. + Broadcasts against the stack of matrices. Default: ``1e-15``. + hermitian : bool, optional + If True, `a` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + .. versionadded:: 1.17.0 + rtol : (...) array_like of float, optional + Same as `rcond`, but it's an Array API compatible parameter name. + Only `rcond` or `rtol` can be set at a time. If none of them are + provided then NumPy's ``1e-15`` default is used. If ``rtol=None`` + is passed then the API standard default is used. + + .. versionadded:: 2.0.0 + + Returns + ------- + B : (..., N, M) ndarray + The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so + is `B`. + + Raises + ------ + LinAlgError + If the SVD computation does not converge. + + See Also + -------- + scipy.linalg.pinv : Similar function in SciPy. + scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a + Hermitian matrix. + + Notes + ----- + The pseudo-inverse of a matrix A, denoted :math:`A^+`, is + defined as: "the matrix that 'solves' [the least-squares problem] + :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then + :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. + + It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular + value decomposition of A, then + :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are + orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting + of A's so-called singular values, (followed, typically, by + zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix + consisting of the reciprocals of A's singular values + (again, followed by zeros). [1]_ + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pp. 139-142. + + Examples + -------- + The following example checks that ``a * a+ * a == a`` and + ``a+ * a * a+ == a+``: + + >>> a = np.random.randn(9, 6) + >>> B = np.linalg.pinv(a) + >>> np.allclose(a, np.dot(a, np.dot(B, a))) + True + >>> np.allclose(B, np.dot(B, np.dot(a, B))) + True + + """ + a, wrap = _makearray(a) + if rcond is None: + if rtol is _NoValue: + rcond = 1e-15 + elif rtol is None: + rcond = max(a.shape[-2:]) * finfo(a.dtype).eps + else: + rcond = rtol + elif rtol is not _NoValue: + raise ValueError("`rtol` and `rcond` can't be both set.") + else: + # NOTE: Deprecate `rcond` in a few versions. + pass + + rcond = asarray(rcond) + if _is_empty_2d(a): + m, n = a.shape[-2:] + res = empty(a.shape[:-2] + (n, m), dtype=a.dtype) + return wrap(res) + a = a.conjugate() + u, s, vt = svd(a, full_matrices=False, hermitian=hermitian) + + # discard small singular values + cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True) + large = s > cutoff + s = divide(1, s, where=large, out=s) + s[~large] = 0 + + res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u))) + return wrap(res) + + +# Determinant + + +@array_function_dispatch(_unary_dispatcher) +def slogdet(a): + """ + Compute the sign and (natural) logarithm of the determinant of an array. + + If an array has a very small or very large determinant, then a call to + `det` may overflow or underflow. This routine is more robust against such + issues, because it computes the logarithm of the determinant rather than + the determinant itself. + + Parameters + ---------- + a : (..., M, M) array_like + Input array, has to be a square 2-D array. + + Returns + ------- + A namedtuple with the following attributes: + + sign : (...) array_like + A number representing the sign of the determinant. For a real matrix, + this is 1, 0, or -1. For a complex matrix, this is a complex number + with absolute value 1 (i.e., it is on the unit circle), or else 0. + logabsdet : (...) array_like + The natural log of the absolute value of the determinant. + + If the determinant is zero, then `sign` will be 0 and `logabsdet` + will be -inf. In all cases, the determinant is equal to + ``sign * np.exp(logabsdet)``. + + See Also + -------- + det + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + .. versionadded:: 1.6.0 + + The determinant is computed via LU factorization using the LAPACK + routine ``z/dgetrf``. + + + Examples + -------- + The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> (sign, logabsdet) = np.linalg.slogdet(a) + >>> (sign, logabsdet) + (-1, 0.69314718055994529) # may vary + >>> sign * np.exp(logabsdet) + -2.0 + + Computing log-determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> sign, logabsdet = np.linalg.slogdet(a) + >>> (sign, logabsdet) + (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154])) + >>> sign * np.exp(logabsdet) + array([-2., -3., -8.]) + + This routine succeeds where ordinary `det` does not: + + >>> np.linalg.det(np.eye(500) * 0.1) + 0.0 + >>> np.linalg.slogdet(np.eye(500) * 0.1) + (1, -1151.2925464970228) + + """ + a = asarray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + real_t = _realType(result_t) + signature = 'D->Dd' if isComplexType(t) else 'd->dd' + sign, logdet = _umath_linalg.slogdet(a, signature=signature) + sign = sign.astype(result_t, copy=False) + logdet = logdet.astype(real_t, copy=False) + return SlogdetResult(sign, logdet) + + +@array_function_dispatch(_unary_dispatcher) +def det(a): + """ + Compute the determinant of an array. + + Parameters + ---------- + a : (..., M, M) array_like + Input array to compute determinants for. + + Returns + ------- + det : (...) array_like + Determinant of `a`. + + See Also + -------- + slogdet : Another way to represent the determinant, more suitable + for large matrices where underflow/overflow may occur. + scipy.linalg.det : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The determinant is computed via LU factorization using the LAPACK + routine ``z/dgetrf``. + + Examples + -------- + The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.linalg.det(a) + -2.0 # may vary + + Computing determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> np.linalg.det(a) + array([-2., -3., -8.]) + + """ + a = asarray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + r = _umath_linalg.det(a, signature=signature) + r = r.astype(result_t, copy=False) + return r + + +# Linear Least Squares + +def _lstsq_dispatcher(a, b, rcond=None): + return (a, b) + + +@array_function_dispatch(_lstsq_dispatcher) +def lstsq(a, b, rcond=None): + r""" + Return the least-squares solution to a linear matrix equation. + + Computes the vector `x` that approximately solves the equation + ``a @ x = b``. The equation may be under-, well-, or over-determined + (i.e., the number of linearly independent rows of `a` can be less than, + equal to, or greater than its number of linearly independent columns). + If `a` is square and of full rank, then `x` (but for round-off error) + is the "exact" solution of the equation. Else, `x` minimizes the + Euclidean 2-norm :math:`||b - ax||`. If there are multiple minimizing + solutions, the one with the smallest 2-norm :math:`||x||` is returned. + + Parameters + ---------- + a : (M, N) array_like + "Coefficient" matrix. + b : {(M,), (M, K)} array_like + Ordinate or "dependent variable" values. If `b` is two-dimensional, + the least-squares solution is calculated for each of the `K` columns + of `b`. + rcond : float, optional + Cut-off ratio for small singular values of `a`. + For the purposes of rank determination, singular values are treated + as zero if they are smaller than `rcond` times the largest singular + value of `a`. + The default uses the machine precision times ``max(M, N)``. Passing + ``-1`` will use machine precision. + + .. versionchanged:: 2.0 + Previously, the default was ``-1``, but a warning was given that + this would change. + + Returns + ------- + x : {(N,), (N, K)} ndarray + Least-squares solution. If `b` is two-dimensional, + the solutions are in the `K` columns of `x`. + residuals : {(1,), (K,), (0,)} ndarray + Sums of squared residuals: Squared Euclidean 2-norm for each column in + ``b - a @ x``. + If the rank of `a` is < N or M <= N, this is an empty array. + If `b` is 1-dimensional, this is a (1,) shape array. + Otherwise the shape is (K,). + rank : int + Rank of matrix `a`. + s : (min(M, N),) ndarray + Singular values of `a`. + + Raises + ------ + LinAlgError + If computation does not converge. + + See Also + -------- + scipy.linalg.lstsq : Similar function in SciPy. + + Notes + ----- + If `b` is a matrix, then all array results are returned as matrices. + + Examples + -------- + Fit a line, ``y = mx + c``, through some noisy data-points: + + >>> x = np.array([0, 1, 2, 3]) + >>> y = np.array([-1, 0.2, 0.9, 2.1]) + + By examining the coefficients, we see that the line should have a + gradient of roughly 1 and cut the y-axis at, more or less, -1. + + We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` + and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: + + >>> A = np.vstack([x, np.ones(len(x))]).T + >>> A + array([[ 0., 1.], + [ 1., 1.], + [ 2., 1.], + [ 3., 1.]]) + + >>> m, c = np.linalg.lstsq(A, y)[0] + >>> m, c + (1.0 -0.95) # may vary + + Plot the data along with the fitted line: + + >>> import matplotlib.pyplot as plt + >>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10) + >>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line') + >>> _ = plt.legend() + >>> plt.show() + + """ + a, _ = _makearray(a) + b, wrap = _makearray(b) + is_1d = b.ndim == 1 + if is_1d: + b = b[:, newaxis] + _assert_2d(a, b) + m, n = a.shape[-2:] + m2, n_rhs = b.shape[-2:] + if m != m2: + raise LinAlgError('Incompatible dimensions') + + t, result_t = _commonType(a, b) + result_real_t = _realType(result_t) + + if rcond is None: + rcond = finfo(t).eps * max(n, m) + + if m <= n: + gufunc = _umath_linalg.lstsq_m + else: + gufunc = _umath_linalg.lstsq_n + + signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid' + if n_rhs == 0: + # lapack can't handle n_rhs = 0 - so allocate + # the array one larger in that axis + b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype) + + with errstate(call=_raise_linalgerror_lstsq, invalid='call', + over='ignore', divide='ignore', under='ignore'): + x, resids, rank, s = gufunc(a, b, rcond, signature=signature) + if m == 0: + x[...] = 0 + if n_rhs == 0: + # remove the item we added + x = x[..., :n_rhs] + resids = resids[..., :n_rhs] + + # remove the axis we added + if is_1d: + x = x.squeeze(axis=-1) + # we probably should squeeze resids too, but we can't + # without breaking compatibility. + + # as documented + if rank != n or m <= n: + resids = array([], result_real_t) + + # coerce output arrays + s = s.astype(result_real_t, copy=False) + resids = resids.astype(result_real_t, copy=False) + # Copying lets the memory in r_parts be freed + x = x.astype(result_t, copy=True) + return wrap(x), wrap(resids), rank, s + + +def _multi_svd_norm(x, row_axis, col_axis, op): + """Compute a function of the singular values of the 2-D matrices in `x`. + + This is a private utility function used by `numpy.linalg.norm()`. + + Parameters + ---------- + x : ndarray + row_axis, col_axis : int + The axes of `x` that hold the 2-D matrices. + op : callable + This should be either numpy.amin or `numpy.amax` or `numpy.sum`. + + Returns + ------- + result : float or ndarray + If `x` is 2-D, the return values is a float. + Otherwise, it is an array with ``x.ndim - 2`` dimensions. + The return values are either the minimum or maximum or sum of the + singular values of the matrices, depending on whether `op` + is `numpy.amin` or `numpy.amax` or `numpy.sum`. + + """ + y = moveaxis(x, (row_axis, col_axis), (-2, -1)) + result = op(svd(y, compute_uv=False), axis=-1) + return result + + +def _norm_dispatcher(x, ord=None, axis=None, keepdims=None): + return (x,) + + +@array_function_dispatch(_norm_dispatcher) +def norm(x, ord=None, axis=None, keepdims=False): + """ + Matrix or vector norm. + + This function is able to return one of eight different matrix norms, + or one of an infinite number of vector norms (described below), depending + on the value of the ``ord`` parameter. + + Parameters + ---------- + x : array_like + Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord` + is None. If both `axis` and `ord` are None, the 2-norm of + ``x.ravel`` will be returned. + ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional + Order of the norm (see table under ``Notes``). inf means numpy's + `inf` object. The default is None. + axis : {None, int, 2-tuple of ints}, optional. + If `axis` is an integer, it specifies the axis of `x` along which to + compute the vector norms. If `axis` is a 2-tuple, it specifies the + axes that hold 2-D matrices, and the matrix norms of these matrices + are computed. If `axis` is None then either a vector norm (when `x` + is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default + is None. + + .. versionadded:: 1.8.0 + + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in the + result as dimensions with size one. With this option the result will + broadcast correctly against the original `x`. + + .. versionadded:: 1.10.0 + + Returns + ------- + n : float or ndarray + Norm of the matrix or vector(s). + + See Also + -------- + scipy.linalg.norm : Similar function in SciPy. + + Notes + ----- + For values of ``ord < 1``, the result is, strictly speaking, not a + mathematical 'norm', but it may still be useful for various numerical + purposes. + + The following norms can be calculated: + + ===== ============================ ========================== + ord norm for matrices norm for vectors + ===== ============================ ========================== + None Frobenius norm 2-norm + 'fro' Frobenius norm -- + 'nuc' nuclear norm -- + inf max(sum(abs(x), axis=1)) max(abs(x)) + -inf min(sum(abs(x), axis=1)) min(abs(x)) + 0 -- sum(x != 0) + 1 max(sum(abs(x), axis=0)) as below + -1 min(sum(abs(x), axis=0)) as below + 2 2-norm (largest sing. value) as below + -2 smallest singular value as below + other -- sum(abs(x)**ord)**(1./ord) + ===== ============================ ========================== + + The Frobenius norm is given by [1]_: + + :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` + + The nuclear norm is the sum of the singular values. + + Both the Frobenius and nuclear norm orders are only defined for + matrices and raise a ValueError when ``x.ndim != 2``. + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, + Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, ..., 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.norm(a) + 7.745966692414834 + >>> LA.norm(b) + 7.745966692414834 + >>> LA.norm(b, 'fro') + 7.745966692414834 + >>> LA.norm(a, np.inf) + 4.0 + >>> LA.norm(b, np.inf) + 9.0 + >>> LA.norm(a, -np.inf) + 0.0 + >>> LA.norm(b, -np.inf) + 2.0 + + >>> LA.norm(a, 1) + 20.0 + >>> LA.norm(b, 1) + 7.0 + >>> LA.norm(a, -1) + -4.6566128774142013e-010 + >>> LA.norm(b, -1) + 6.0 + >>> LA.norm(a, 2) + 7.745966692414834 + >>> LA.norm(b, 2) + 7.3484692283495345 + + >>> LA.norm(a, -2) + 0.0 + >>> LA.norm(b, -2) + 1.8570331885190563e-016 # may vary + >>> LA.norm(a, 3) + 5.8480354764257312 # may vary + >>> LA.norm(a, -3) + 0.0 + + Using the `axis` argument to compute vector norms: + + >>> c = np.array([[ 1, 2, 3], + ... [-1, 1, 4]]) + >>> LA.norm(c, axis=0) + array([ 1.41421356, 2.23606798, 5. ]) + >>> LA.norm(c, axis=1) + array([ 3.74165739, 4.24264069]) + >>> LA.norm(c, ord=1, axis=1) + array([ 6., 6.]) + + Using the `axis` argument to compute matrix norms: + + >>> m = np.arange(8).reshape(2,2,2) + >>> LA.norm(m, axis=(1,2)) + array([ 3.74165739, 11.22497216]) + >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) + (3.7416573867739413, 11.224972160321824) + + """ + x = asarray(x) + + if not issubclass(x.dtype.type, (inexact, object_)): + x = x.astype(float) + + # Immediately handle some default, simple, fast, and common cases. + if axis is None: + ndim = x.ndim + if ( + (ord is None) or + (ord in ('f', 'fro') and ndim == 2) or + (ord == 2 and ndim == 1) + ): + x = x.ravel(order='K') + if isComplexType(x.dtype.type): + x_real = x.real + x_imag = x.imag + sqnorm = x_real.dot(x_real) + x_imag.dot(x_imag) + else: + sqnorm = x.dot(x) + ret = sqrt(sqnorm) + if keepdims: + ret = ret.reshape(ndim*[1]) + return ret + + # Normalize the `axis` argument to a tuple. + nd = x.ndim + if axis is None: + axis = tuple(range(nd)) + elif not isinstance(axis, tuple): + try: + axis = int(axis) + except Exception as e: + raise TypeError( + "'axis' must be None, an integer or a tuple of integers" + ) from e + axis = (axis,) + + if len(axis) == 1: + if ord == inf: + return abs(x).max(axis=axis, keepdims=keepdims) + elif ord == -inf: + return abs(x).min(axis=axis, keepdims=keepdims) + elif ord == 0: + # Zero norm + return ( + (x != 0) + .astype(x.real.dtype) + .sum(axis=axis, keepdims=keepdims) + ) + elif ord == 1: + # special case for speedup + return add.reduce(abs(x), axis=axis, keepdims=keepdims) + elif ord is None or ord == 2: + # special case for speedup + s = (x.conj() * x).real + return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) + # None of the str-type keywords for ord ('fro', 'nuc') + # are valid for vectors + elif isinstance(ord, str): + raise ValueError(f"Invalid norm order '{ord}' for vectors") + else: + absx = abs(x) + absx **= ord + ret = add.reduce(absx, axis=axis, keepdims=keepdims) + ret **= reciprocal(ord, dtype=ret.dtype) + return ret + elif len(axis) == 2: + row_axis, col_axis = axis + row_axis = normalize_axis_index(row_axis, nd) + col_axis = normalize_axis_index(col_axis, nd) + if row_axis == col_axis: + raise ValueError('Duplicate axes given.') + if ord == 2: + ret = _multi_svd_norm(x, row_axis, col_axis, amax) + elif ord == -2: + ret = _multi_svd_norm(x, row_axis, col_axis, amin) + elif ord == 1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis) + elif ord == inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis) + elif ord == -1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) + elif ord == -inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) + elif ord in [None, 'fro', 'f']: + ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) + elif ord == 'nuc': + ret = _multi_svd_norm(x, row_axis, col_axis, sum) + else: + raise ValueError("Invalid norm order for matrices.") + if keepdims: + ret_shape = list(x.shape) + ret_shape[axis[0]] = 1 + ret_shape[axis[1]] = 1 + ret = ret.reshape(ret_shape) + return ret + else: + raise ValueError("Improper number of dimensions to norm.") + + +# multi_dot + +def _multidot_dispatcher(arrays, *, out=None): + yield from arrays + yield out + + +@array_function_dispatch(_multidot_dispatcher) +def multi_dot(arrays, *, out=None): + """ + Compute the dot product of two or more arrays in a single function call, + while automatically selecting the fastest evaluation order. + + `multi_dot` chains `numpy.dot` and uses optimal parenthesization + of the matrices [1]_ [2]_. Depending on the shapes of the matrices, + this can speed up the multiplication a lot. + + If the first argument is 1-D it is treated as a row vector. + If the last argument is 1-D it is treated as a column vector. + The other arguments must be 2-D. + + Think of `multi_dot` as:: + + def multi_dot(arrays): return functools.reduce(np.dot, arrays) + + + Parameters + ---------- + arrays : sequence of array_like + If the first argument is 1-D it is treated as row vector. + If the last argument is 1-D it is treated as column vector. + The other arguments must be 2-D. + out : ndarray, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a, b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + .. versionadded:: 1.19.0 + + Returns + ------- + output : ndarray + Returns the dot product of the supplied arrays. + + See Also + -------- + numpy.dot : dot multiplication with two arguments. + + References + ---------- + + .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378 + .. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication + + Examples + -------- + `multi_dot` allows you to write:: + + >>> from numpy.linalg import multi_dot + >>> # Prepare some data + >>> A = np.random.random((10000, 100)) + >>> B = np.random.random((100, 1000)) + >>> C = np.random.random((1000, 5)) + >>> D = np.random.random((5, 333)) + >>> # the actual dot multiplication + >>> _ = multi_dot([A, B, C, D]) + + instead of:: + + >>> _ = np.dot(np.dot(np.dot(A, B), C), D) + >>> # or + >>> _ = A.dot(B).dot(C).dot(D) + + Notes + ----- + The cost for a matrix multiplication can be calculated with the + following function:: + + def cost(A, B): + return A.shape[0] * A.shape[1] * B.shape[1] + + Assume we have three matrices + :math:`A_{10x100}, B_{100x5}, C_{5x50}`. + + The costs for the two different parenthesizations are as follows:: + + cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500 + cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000 + + """ + n = len(arrays) + # optimization only makes sense for len(arrays) > 2 + if n < 2: + raise ValueError("Expecting at least two arrays.") + elif n == 2: + return dot(arrays[0], arrays[1], out=out) + + arrays = [asanyarray(a) for a in arrays] + + # save original ndim to reshape the result array into the proper form later + ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim + # Explicitly convert vectors to 2D arrays to keep the logic of the internal + # _multi_dot_* functions as simple as possible. + if arrays[0].ndim == 1: + arrays[0] = atleast_2d(arrays[0]) + if arrays[-1].ndim == 1: + arrays[-1] = atleast_2d(arrays[-1]).T + _assert_2d(*arrays) + + # _multi_dot_three is much faster than _multi_dot_matrix_chain_order + if n == 3: + result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out) + else: + order = _multi_dot_matrix_chain_order(arrays) + result = _multi_dot(arrays, order, 0, n - 1, out=out) + + # return proper shape + if ndim_first == 1 and ndim_last == 1: + return result[0, 0] # scalar + elif ndim_first == 1 or ndim_last == 1: + return result.ravel() # 1-D + else: + return result + + +def _multi_dot_three(A, B, C, out=None): + """ + Find the best order for three arrays and do the multiplication. + + For three arguments `_multi_dot_three` is approximately 15 times faster + than `_multi_dot_matrix_chain_order` + + """ + a0, a1b0 = A.shape + b1c0, c1 = C.shape + # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1 + cost1 = a0 * b1c0 * (a1b0 + c1) + # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1 + cost2 = a1b0 * c1 * (a0 + b1c0) + + if cost1 < cost2: + return dot(dot(A, B), C, out=out) + else: + return dot(A, dot(B, C), out=out) + + +def _multi_dot_matrix_chain_order(arrays, return_costs=False): + """ + Return a np.array that encodes the optimal order of mutiplications. + + The optimal order array is then used by `_multi_dot()` to do the + multiplication. + + Also return the cost matrix if `return_costs` is `True` + + The implementation CLOSELY follows Cormen, "Introduction to Algorithms", + Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices. + + cost[i, j] = min([ + cost[prefix] + cost[suffix] + cost_mult(prefix, suffix) + for k in range(i, j)]) + + """ + n = len(arrays) + # p stores the dimensions of the matrices + # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] + p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]] + # m is a matrix of costs of the subproblems + # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} + m = zeros((n, n), dtype=double) + # s is the actual ordering + # s[i, j] is the value of k at which we split the product A_i..A_j + s = empty((n, n), dtype=intp) + + for l in range(1, n): + for i in range(n - l): + j = i + l + m[i, j] = inf + for k in range(i, j): + q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1] + if q < m[i, j]: + m[i, j] = q + s[i, j] = k # Note that Cormen uses 1-based index + + return (s, m) if return_costs else s + + +def _multi_dot(arrays, order, i, j, out=None): + """Actually do the multiplication with the given order.""" + if i == j: + # the initial call with non-None out should never get here + assert out is None + + return arrays[i] + else: + return dot(_multi_dot(arrays, order, i, order[i, j]), + _multi_dot(arrays, order, order[i, j] + 1, j), + out=out) + + +# diagonal + +def _diagonal_dispatcher(x, /, *, offset=None): + return (x,) + + +@array_function_dispatch(_diagonal_dispatcher) +def diagonal(x, /, *, offset=0): + """ + Returns specified diagonals of a matrix (or a stack of matrices) ``x``. + + This function is Array API compatible, contrary to + :py:func:`numpy.diagonal`, the matrix is assumed + to be defined by the last two dimensions. + + Parameters + ---------- + x : (...,M,N) array_like + Input array having shape (..., M, N) and whose innermost two + dimensions form MxN matrices. + offset : int, optional + Offset specifying the off-diagonal relative to the main diagonal, + where:: + + * offset = 0: the main diagonal. + * offset > 0: off-diagonal above the main diagonal. + * offset < 0: off-diagonal below the main diagonal. + + Returns + ------- + out : (...,min(N,M)) ndarray + An array containing the diagonals and whose shape is determined by + removing the last two dimensions and appending a dimension equal to + the size of the resulting diagonals. The returned array must have + the same data type as ``x``. + + See Also + -------- + numpy.diagonal + + """ + return _core_diagonal(x, offset, axis1=-2, axis2=-1) + + +# trace + +def _trace_dispatcher(x, /, *, offset=None, dtype=None): + return (x,) + + +@array_function_dispatch(_trace_dispatcher) +def trace(x, /, *, offset=0, dtype=None): + """ + Returns the sum along the specified diagonals of a matrix + (or a stack of matrices) ``x``. + + This function is Array API compatible, contrary to + :py:func:`numpy.trace`. + + Parameters + ---------- + x : (...,M,N) array_like + Input array having shape (..., M, N) and whose innermost two + dimensions form MxN matrices. + offset : int, optional + Offset specifying the off-diagonal relative to the main diagonal, + where:: + + * offset = 0: the main diagonal. + * offset > 0: off-diagonal above the main diagonal. + * offset < 0: off-diagonal below the main diagonal. + + dtype : dtype, optional + Data type of the returned array. + + Returns + ------- + out : ndarray + An array containing the traces and whose shape is determined by + removing the last two dimensions and storing the traces in the last + array dimension. For example, if x has rank k and shape: + (I, J, K, ..., L, M, N), then an output array has rank k-2 and shape: + (I, J, K, ..., L) where:: + + out[i, j, k, ..., l] = trace(a[i, j, k, ..., l, :, :]) + + The returned array must have a data type as described by the dtype + parameter above. + + See Also + -------- + numpy.trace + + """ + return _core_trace(x, offset, axis1=-2, axis2=-1, dtype=dtype) + + +# cross + +def _cross_dispatcher(x1, x2, /, *, axis=None): + return (x1, x2,) + + +@array_function_dispatch(_cross_dispatcher) +def cross(x1, x2, /, *, axis=-1): + """ + Returns the cross product of 3-element vectors. + + If ``x1`` and/or ``x2`` are multi-dimensional arrays, then + the cross-product of each pair of corresponding 3-element vectors + is independently computed. + + This function is Array API compatible, contrary to + :func:`numpy.cross`. + + Parameters + ---------- + x1 : array_like + The first input array. + x2 : array_like + The second input array. Must be compatible with ``x1`` for all + non-compute axes. The size of the axis over which to compute + the cross-product must be the same size as the respective axis + in ``x1``. + axis : int, optional + The axis (dimension) of ``x1`` and ``x2`` containing the vectors for + which to compute the cross-product. Default: ``-1``. + + Returns + ------- + out : ndarray + An array containing the cross products. + + See Also + -------- + numpy.cross + + """ + x1 = asanyarray(x1) + x2 = asanyarray(x2) + + if x1.shape[axis] != 3 or x2.shape[axis] != 3: + raise ValueError( + "Both input arrays must be (arrays of) 3-dimensional vectors, " + f"but they are {x1.shape[axis]} and {x2.shape[axis]} " + "dimensional instead." + ) + + return _core_cross(x1, x2, axis=axis) + + +# matmul + +def _matmul_dispatcher(x1, x2, /): + return (x1, x2) + + +@array_function_dispatch(_matmul_dispatcher) +def matmul(x1, x2, /): + """ + Computes the matrix product. + + This function is Array API compatible, contrary to + :func:`numpy.matmul`. + + Parameters + ---------- + x1 : array_like + The first input array. + x2 : array_like + The second input array. + + Returns + ------- + out : ndarray + The matrix product of the inputs. + This is a scalar only when both ``x1``, ``x2`` are 1-d vectors. + + Raises + ------ + ValueError + If the last dimension of ``x1`` is not the same size as + the second-to-last dimension of ``x2``. + + If a scalar value is passed in. + + See Also + -------- + numpy.matmul + + """ + return _core_matmul(x1, x2) + + +# tensordot + +def _tensordot_dispatcher(x1, x2, /, *, axes=None): + return (x1, x2) + + +@array_function_dispatch(_tensordot_dispatcher) +def tensordot(x1, x2, /, *, axes=2): + return _core_tensordot(x1, x2, axes=axes) + + +tensordot.__doc__ = _core_tensordot.__doc__ + + +# matrix_transpose + +def _matrix_transpose_dispatcher(x): + return (x,) + +@array_function_dispatch(_matrix_transpose_dispatcher) +def matrix_transpose(x, /): + return _core_matrix_transpose(x) + + +matrix_transpose.__doc__ = _core_matrix_transpose.__doc__ + + +# matrix_norm + +def _matrix_norm_dispatcher(x, /, *, keepdims=None, ord=None): + return (x,) + +@array_function_dispatch(_matrix_norm_dispatcher) +def matrix_norm(x, /, *, keepdims=False, ord="fro"): + """ + Computes the matrix norm of a matrix (or a stack of matrices) ``x``. + + This function is Array API compatible. + + Parameters + ---------- + x : array_like + Input array having shape (..., M, N) and whose two innermost + dimensions form ``MxN`` matrices. + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in + the result as dimensions with size one. Default: False. + ord : {1, -1, 2, -2, inf, -inf, 'fro', 'nuc'}, optional + The order of the norm. For details see the table under ``Notes`` + in `numpy.linalg.norm`. + + See Also + -------- + numpy.linalg.norm : Generic norm function + + """ + x = asanyarray(x) + return norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord) + + +# vector_norm + +def _vector_norm_dispatcher(x, /, *, axis=None, keepdims=None, ord=None): + return (x,) + +@array_function_dispatch(_vector_norm_dispatcher) +def vector_norm(x, /, *, axis=None, keepdims=False, ord=2): + """ + Computes the vector norm of a vector (or batch of vectors) ``x``. + + This function is Array API compatible. + + Parameters + ---------- + x : array_like + Input array. + axis : {None, int, 2-tuple of ints}, optional + If an integer, ``axis`` specifies the axis (dimension) along which + to compute vector norms. If an n-tuple, ``axis`` specifies the axes + (dimensions) along which to compute batched vector norms. If ``None``, + the vector norm must be computed over all array values (i.e., + equivalent to computing the vector norm of a flattened array). + Default: ``None``. + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in + the result as dimensions with size one. Default: False. + ord : {1, -1, 2, -2, inf, -inf, 'fro', 'nuc'}, optional + The order of the norm. For details see the table under ``Notes`` + in `numpy.linalg.norm`. + + See Also + -------- + numpy.linalg.norm : Generic norm function + + """ + x = asanyarray(x) + shape = list(x.shape) + if axis is None: + # Note: np.linalg.norm() doesn't handle 0-D arrays + x = x.ravel() + _axis = 0 + elif isinstance(axis, tuple): + # Note: The axis argument supports any number of axes, whereas + # np.linalg.norm() only supports a single axis for vector norm. + normalized_axis = normalize_axis_tuple(axis, x.ndim) + rest = tuple(i for i in range(x.ndim) if i not in normalized_axis) + newshape = axis + rest + x = _core_transpose(x, newshape).reshape( + ( + prod([x.shape[i] for i in axis], dtype=int), + *[x.shape[i] for i in rest] + ) + ) + _axis = 0 + else: + _axis = axis + + res = norm(x, axis=_axis, ord=ord) + + if keepdims: + # We can't reuse np.linalg.norm(keepdims) because of the reshape hacks + # above to avoid matrix norm logic. + _axis = normalize_axis_tuple( + range(len(shape)) if axis is None else axis, len(shape) + ) + for i in _axis: + shape[i] = 1 + res = res.reshape(tuple(shape)) + + return res + + +# vecdot + +def _vecdot_dispatcher(x1, x2, /, *, axis=None): + return (x1, x2) + +@array_function_dispatch(_vecdot_dispatcher) +def vecdot(x1, x2, /, *, axis=-1): + """ + Computes the vector dot product. + + This function is restricted to arguments compatible with the Array API, + contrary to :func:`numpy.vecdot`. + + Let :math:`\\mathbf{a}` be a vector in ``x1`` and :math:`\\mathbf{b}` be + a corresponding vector in ``x2``. The dot product is defined as: + + .. math:: + \\mathbf{a} \\cdot \\mathbf{b} = \\sum_{i=0}^{n-1} \\overline{a_i}b_i + + over the dimension specified by ``axis`` and where :math:`\\overline{a_i}` + denotes the complex conjugate if :math:`a_i` is complex and the identity + otherwise. + + Parameters + ---------- + x1 : array_like + First input array. + x2 : array_like + Second input array. + axis : int, optional + Axis over which to compute the dot product. Default: ``-1``. + + Returns + ------- + output : ndarray + The vector dot product of the input. + + See Also + -------- + numpy.vecdot + + """ + return _core_vecdot(x1, x2, axis=axis) diff --git a/phivenv/Lib/site-packages/numpy/linalg/_linalg.pyi b/phivenv/Lib/site-packages/numpy/linalg/_linalg.pyi new file mode 100644 index 0000000000000000000000000000000000000000..969adb11abab7dd2880f2b3885d3d392fe5ce6a8 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/linalg/_linalg.pyi @@ -0,0 +1,426 @@ +from collections.abc import Iterable +from typing import ( + Literal as L, + overload, + TypeVar, + Any, + SupportsIndex, + SupportsInt, + NamedTuple, + Generic, +) + +import numpy as np +from numpy import ( + generic, + floating, + complexfloating, + signedinteger, + unsignedinteger, + timedelta64, + object_, + int32, + float64, + complex128, +) + +from numpy.linalg import LinAlgError as LinAlgError + +from numpy._typing import ( + NDArray, + ArrayLike, + _ArrayLikeUnknown, + _ArrayLikeBool_co, + _ArrayLikeInt_co, + _ArrayLikeUInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeTD64_co, + _ArrayLikeObject_co, + DTypeLike, +) + +_T = TypeVar("_T") +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_SCT = TypeVar("_SCT", bound=generic, covariant=True) +_SCT2 = TypeVar("_SCT2", bound=generic, covariant=True) + +_2Tuple = tuple[_T, _T] +_ModeKind = L["reduced", "complete", "r", "raw"] + +__all__: list[str] + +class EigResult(NamedTuple): + eigenvalues: NDArray[Any] + eigenvectors: NDArray[Any] + +class EighResult(NamedTuple): + eigenvalues: NDArray[Any] + eigenvectors: NDArray[Any] + +class QRResult(NamedTuple): + Q: NDArray[Any] + R: NDArray[Any] + +class SlogdetResult(NamedTuple): + # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and + # a `(x.ndim - 2)`` dimensionl arrays otherwise + sign: Any + logabsdet: Any + +class SVDResult(NamedTuple): + U: NDArray[Any] + S: NDArray[Any] + Vh: NDArray[Any] + +@overload +def tensorsolve( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axes: None | Iterable[int] =..., +) -> NDArray[float64]: ... +@overload +def tensorsolve( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axes: None | Iterable[int] =..., +) -> NDArray[floating[Any]]: ... +@overload +def tensorsolve( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axes: None | Iterable[int] =..., +) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def solve( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, +) -> NDArray[float64]: ... +@overload +def solve( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, +) -> NDArray[floating[Any]]: ... +@overload +def solve( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def tensorinv( + a: _ArrayLikeInt_co, + ind: int = ..., +) -> NDArray[float64]: ... +@overload +def tensorinv( + a: _ArrayLikeFloat_co, + ind: int = ..., +) -> NDArray[floating[Any]]: ... +@overload +def tensorinv( + a: _ArrayLikeComplex_co, + ind: int = ..., +) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ... +@overload +def inv(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +@overload +def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +# TODO: The supported input and output dtypes are dependent on the value of `n`. +# For example: `n < 0` always casts integer types to float64 +def matrix_power( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + n: SupportsIndex, +) -> NDArray[Any]: ... + +@overload +def cholesky(a: _ArrayLikeInt_co) -> NDArray[float64]: ... +@overload +def cholesky(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +@overload +def cholesky(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def outer(x1: _ArrayLikeUnknown, x2: _ArrayLikeUnknown) -> NDArray[Any]: ... +@overload +def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co) -> NDArray[np.bool]: ... +@overload +def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... +@overload +def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... +@overload +def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +@overload +def outer( + x1: _ArrayLikeComplex_co, + x2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def outer( + x1: _ArrayLikeTD64_co, + x2: _ArrayLikeTD64_co, + out: None = ..., +) -> NDArray[timedelta64]: ... +@overload +def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co) -> NDArray[object_]: ... +@overload +def outer( + x1: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + x2: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, +) -> _ArrayType: ... + +@overload +def qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> QRResult: ... +@overload +def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> QRResult: ... +@overload +def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> QRResult: ... + +@overload +def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... +@overload +def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]] | NDArray[complexfloating[Any, Any]]: ... +@overload +def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]: ... +@overload +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating[Any]]: ... + +@overload +def eig(a: _ArrayLikeInt_co) -> EigResult: ... +@overload +def eig(a: _ArrayLikeFloat_co) -> EigResult: ... +@overload +def eig(a: _ArrayLikeComplex_co) -> EigResult: ... + +@overload +def eigh( + a: _ArrayLikeInt_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> EighResult: ... +@overload +def eigh( + a: _ArrayLikeFloat_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> EighResult: ... +@overload +def eigh( + a: _ArrayLikeComplex_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> EighResult: ... + +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeFloat_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool = ..., + compute_uv: L[False] = ..., + hermitian: bool = ..., +) -> NDArray[float64]: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool = ..., + compute_uv: L[False] = ..., + hermitian: bool = ..., +) -> NDArray[floating[Any]]: ... + +def svdvals( + x: _ArrayLikeInt_co | _ArrayLikeFloat_co | _ArrayLikeComplex_co +) -> NDArray[floating[Any]]: ... + +# TODO: Returns a scalar for 2D arrays and +# a `(x.ndim - 2)`` dimensionl array otherwise +def cond(x: _ArrayLikeComplex_co, p: None | float | L["fro", "nuc"] = ...) -> Any: ... + +# TODO: Returns `int` for <2D arrays and `intp` otherwise +def matrix_rank( + A: _ArrayLikeComplex_co, + tol: None | _ArrayLikeFloat_co = ..., + hermitian: bool = ..., + *, + rtol: None | _ArrayLikeFloat_co = ..., +) -> Any: ... + +@overload +def pinv( + a: _ArrayLikeInt_co, + rcond: _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> NDArray[float64]: ... +@overload +def pinv( + a: _ArrayLikeFloat_co, + rcond: _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> NDArray[floating[Any]]: ... +@overload +def pinv( + a: _ArrayLikeComplex_co, + rcond: _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> NDArray[complexfloating[Any, Any]]: ... + +# TODO: Returns a 2-tuple of scalars for 2D arrays and +# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... + +# TODO: Returns a 2-tuple of scalars for 2D arrays and +# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +def det(a: _ArrayLikeComplex_co) -> Any: ... + +@overload +def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: None | float = ...) -> tuple[ + NDArray[float64], + NDArray[float64], + int32, + NDArray[float64], +]: ... +@overload +def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: None | float = ...) -> tuple[ + NDArray[floating[Any]], + NDArray[floating[Any]], + int32, + NDArray[floating[Any]], +]: ... +@overload +def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: None | float = ...) -> tuple[ + NDArray[complexfloating[Any, Any]], + NDArray[floating[Any]], + int32, + NDArray[floating[Any]], +]: ... + +@overload +def norm( + x: ArrayLike, + ord: None | float | L["fro", "nuc"] = ..., + axis: None = ..., + keepdims: bool = ..., +) -> floating[Any]: ... +@overload +def norm( + x: ArrayLike, + ord: None | float | L["fro", "nuc"] = ..., + axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., + keepdims: bool = ..., +) -> Any: ... + +@overload +def matrix_norm( + x: ArrayLike, + ord: None | float | L["fro", "nuc"] = ..., + keepdims: bool = ..., +) -> floating[Any]: ... +@overload +def matrix_norm( + x: ArrayLike, + ord: None | float | L["fro", "nuc"] = ..., + keepdims: bool = ..., +) -> Any: ... + +@overload +def vector_norm( + x: ArrayLike, + axis: None = ..., + ord: None | float = ..., + keepdims: bool = ..., +) -> floating[Any]: ... +@overload +def vector_norm( + x: ArrayLike, + axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., + ord: None | float = ..., + keepdims: bool = ..., +) -> Any: ... + +# TODO: Returns a scalar or array +def multi_dot( + arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], + *, + out: None | NDArray[Any] = ..., +) -> Any: ... + +def diagonal( + x: ArrayLike, # >= 2D array + offset: SupportsIndex = ..., +) -> NDArray[Any]: ... + +def trace( + x: ArrayLike, # >= 2D array + offset: SupportsIndex = ..., + dtype: DTypeLike = ..., +) -> Any: ... + +@overload +def cross( + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, + axis: int = ..., +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def cross( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axis: int = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def cross( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axis: int = ..., +) -> NDArray[floating[Any]]: ... +@overload +def cross( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axis: int = ..., +) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def matmul( + x1: _ArrayLikeInt_co, + x2: _ArrayLikeInt_co, +) -> NDArray[signedinteger[Any]]: ... +@overload +def matmul( + x1: _ArrayLikeUInt_co, + x2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def matmul( + x1: _ArrayLikeFloat_co, + x2: _ArrayLikeFloat_co, +) -> NDArray[floating[Any]]: ... +@overload +def matmul( + x1: _ArrayLikeComplex_co, + x2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... diff --git a/phivenv/Lib/site-packages/numpy/linalg/_umath_linalg.cp39-win_amd64.lib b/phivenv/Lib/site-packages/numpy/linalg/_umath_linalg.cp39-win_amd64.lib new file mode 100644 index 0000000000000000000000000000000000000000..5bc85a37d376d8e9f3a1f6d30ff46cacb5f8a049 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/linalg/_umath_linalg.cp39-win_amd64.lib differ diff --git a/phivenv/Lib/site-packages/numpy/linalg/lapack_lite.cp39-win_amd64.lib b/phivenv/Lib/site-packages/numpy/linalg/lapack_lite.cp39-win_amd64.lib new file mode 100644 index 0000000000000000000000000000000000000000..2422213824bc45897204efa0b35c33c5b433b828 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/linalg/lapack_lite.cp39-win_amd64.lib differ diff --git a/phivenv/Lib/site-packages/numpy/linalg/lapack_lite.cp39-win_amd64.pyd b/phivenv/Lib/site-packages/numpy/linalg/lapack_lite.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..df3e86dabc9bf24c4a585b6165eb05a61d2aac00 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/linalg/lapack_lite.cp39-win_amd64.pyd differ diff --git a/phivenv/Lib/site-packages/numpy/linalg/linalg.py b/phivenv/Lib/site-packages/numpy/linalg/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..5885499da577515f62f426f23f1cf2a9140ac773 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/linalg/linalg.py @@ -0,0 +1,16 @@ +def __getattr__(attr_name): + import warnings + from numpy.linalg import _linalg + ret = getattr(_linalg, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.linalg.linalg' has no attribute {attr_name}") + warnings.warn( + "The numpy.linalg.linalg has been made private and renamed to " + "numpy.linalg._linalg. All public functions exported by it are " + f"available from numpy.linalg. Please use numpy.linalg.{attr_name} " + "instead.", + DeprecationWarning, + stacklevel=3 + ) + return ret diff --git a/phivenv/Lib/site-packages/numpy/linalg/tests/__init__.py b/phivenv/Lib/site-packages/numpy/linalg/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/numpy/linalg/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/linalg/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e485ef565ec9693dbd40f46c5abee670bbca1af Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/linalg/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/linalg/tests/__pycache__/test_deprecations.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/linalg/tests/__pycache__/test_deprecations.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..278597acb9c18e3c8dc0c215d2e8d916339476e6 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/linalg/tests/__pycache__/test_deprecations.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/linalg/tests/__pycache__/test_linalg.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/linalg/tests/__pycache__/test_linalg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73def0ec54b8dd1d7c1d469662d5525d99e75b91 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/linalg/tests/__pycache__/test_linalg.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/linalg/tests/__pycache__/test_regression.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/linalg/tests/__pycache__/test_regression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7449f14fb810bd4bdd7890afb3f819e0913d3195 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/linalg/tests/__pycache__/test_regression.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/linalg/tests/test_deprecations.py b/phivenv/Lib/site-packages/numpy/linalg/tests/test_deprecations.py new file mode 100644 index 0000000000000000000000000000000000000000..3dad16f01fdb78b5f703b2d2591746dbcba99aaf --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/linalg/tests/test_deprecations.py @@ -0,0 +1,20 @@ +"""Test deprecation and future warnings. + +""" +import numpy as np +from numpy.testing import assert_warns + + +def test_qr_mode_full_future_warning(): + """Check mode='full' FutureWarning. + + In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were + deprecated. The release date will probably be sometime in the summer + of 2013. + + """ + a = np.eye(2) + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/phivenv/Lib/site-packages/numpy/linalg/tests/test_linalg.py b/phivenv/Lib/site-packages/numpy/linalg/tests/test_linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..7b05500c1b1a586ff1ce0c3c2eaa0b5bae684cf6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/linalg/tests/test_linalg.py @@ -0,0 +1,2383 @@ +""" Test functions for linalg module + +""" +import os +import sys +import itertools +import traceback +import textwrap +import subprocess +import pytest + +import numpy as np +from numpy import array, single, double, csingle, cdouble, dot, identity, matmul +from numpy._core import swapaxes +from numpy.exceptions import AxisError +from numpy import multiply, atleast_2d, inf, asarray +from numpy import linalg +from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError +from numpy.linalg._linalg import _multi_dot_matrix_chain_order +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_array_equal, + assert_almost_equal, assert_allclose, suppress_warnings, + assert_raises_regex, HAS_LAPACK64, IS_WASM + ) +try: + import numpy.linalg.lapack_lite +except ImportError: + # May be broken when numpy was built without BLAS/LAPACK present + # If so, ensure we don't break the whole test suite - the `lapack_lite` + # submodule should be removed, it's only used in two tests in this file. + pass + + +def consistent_subclass(out, in_): + # For ndarray subclass input, our output should have the same subclass + # (non-ndarray input gets converted to ndarray). + return type(out) is (type(in_) if isinstance(in_, np.ndarray) + else np.ndarray) + + +old_assert_almost_equal = assert_almost_equal + + +def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw): + if asarray(a).dtype.type in (single, csingle): + decimal = single_decimal + else: + decimal = double_decimal + old_assert_almost_equal(a, b, decimal=decimal, **kw) + + +def get_real_dtype(dtype): + return {single: single, double: double, + csingle: single, cdouble: double}[dtype] + + +def get_complex_dtype(dtype): + return {single: csingle, double: cdouble, + csingle: csingle, cdouble: cdouble}[dtype] + + +def get_rtol(dtype): + # Choose a safe rtol + if dtype in (single, csingle): + return 1e-5 + else: + return 1e-11 + + +# used to categorize tests +all_tags = { + 'square', 'nonsquare', 'hermitian', # mutually exclusive + 'generalized', 'size-0', 'strided' # optional additions +} + + +class LinalgCase: + def __init__(self, name, a, b, tags=set()): + """ + A bundle of arguments to be passed to a test case, with an identifying + name, the operands a and b, and a set of tags to filter the tests + """ + assert_(isinstance(name, str)) + self.name = name + self.a = a + self.b = b + self.tags = frozenset(tags) # prevent shared tags + + def check(self, do): + """ + Run the function `do` on this test case, expanding arguments + """ + do(self.a, self.b, tags=self.tags) + + def __repr__(self): + return f'' + + +def apply_tag(tag, cases): + """ + Add the given tag (a string) to each of the cases (a list of LinalgCase + objects) + """ + assert tag in all_tags, "Invalid tag" + for case in cases: + case.tags = case.tags | {tag} + return cases + + +# +# Base test cases +# + +np.random.seed(1234) + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("single", + array([[1., 2.], [3., 4.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("double", + array([[1., 2.], [3., 4.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_2", + array([[1., 2.], [3., 4.]], dtype=double), + array([[2., 1., 4.], [3., 4., 6.]], dtype=double)), + LinalgCase("csingle", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("cdouble", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_2", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)), + LinalgCase("0x0", + np.empty((0, 0), dtype=double), + np.empty((0,), dtype=double), + tags={'size-0'}), + LinalgCase("8x8", + np.random.rand(8, 8), + np.random.rand(8)), + LinalgCase("1x1", + np.random.rand(1, 1), + np.random.rand(1)), + LinalgCase("nonarray", + [[1, 2], [3, 4]], + [2, 1]), +]) + +# non-square test-cases +CASES += apply_tag('nonsquare', [ + LinalgCase("single_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("single_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=single), + array([2., 1., 3.], dtype=single)), + LinalgCase("double_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=double), + array([2., 1., 3.], dtype=double)), + LinalgCase("csingle_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("csingle_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)), + LinalgCase("cdouble_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)), + LinalgCase("cdouble_nsq_1_2", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("cdouble_nsq_2_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("8x11", + np.random.rand(8, 11), + np.random.rand(8)), + LinalgCase("1x5", + np.random.rand(1, 5), + np.random.rand(1)), + LinalgCase("5x1", + np.random.rand(5, 1), + np.random.rand(5)), + LinalgCase("0x4", + np.random.rand(0, 4), + np.random.rand(0), + tags={'size-0'}), + LinalgCase("4x0", + np.random.rand(4, 0), + np.random.rand(4), + tags={'size-0'}), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hsingle", + array([[1., 2.], [2., 1.]], dtype=single), + None), + LinalgCase("hdouble", + array([[1., 2.], [2., 1.]], dtype=double), + None), + LinalgCase("hcsingle", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle), + None), + LinalgCase("hcdouble", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble), + None), + LinalgCase("hempty", + np.empty((0, 0), dtype=double), + None, + tags={'size-0'}), + LinalgCase("hnonarray", + [[1, 2], [2, 1]], + None), + LinalgCase("matrix_b_only", + array([[1., 2.], [2., 1.]]), + None), + LinalgCase("hmatrix_1x1", + np.random.rand(1, 1), + None), +]) + + +# +# Gufunc test cases +# +def _make_generalized_cases(): + new_cases = [] + + for case in CASES: + if not isinstance(case.a, np.ndarray): + continue + + a = np.array([case.a, 2 * case.a, 3 * case.a]) + if case.b is None: + b = None + elif case.b.ndim == 1: + b = case.b + else: + b = np.array([case.b, 7 * case.b, 6 * case.b]) + new_case = LinalgCase(case.name + "_tile3", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape) + if case.b is None: + b = None + elif case.b.ndim == 1: + b = np.array([case.b] * 2 * 3 * a.shape[-1])\ + .reshape((3, 2) + case.a.shape[-2:]) + else: + b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape) + new_case = LinalgCase(case.name + "_tile213", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + return new_cases + + +CASES += _make_generalized_cases() + + +# +# Generate stride combination variations of the above +# +def _stride_comb_iter(x): + """ + Generate cartesian product of strides for all axes + """ + + if not isinstance(x, np.ndarray): + yield x, "nop" + return + + stride_set = [(1,)] * x.ndim + stride_set[-1] = (1, 3, -4) + if x.ndim > 1: + stride_set[-2] = (1, 3, -4) + if x.ndim > 2: + stride_set[-3] = (1, -4) + + for repeats in itertools.product(*tuple(stride_set)): + new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)] + slices = tuple([slice(None, None, repeat) for repeat in repeats]) + + # new array with different strides, but same data + xi = np.empty(new_shape, dtype=x.dtype) + xi.view(np.uint32).fill(0xdeadbeef) + xi = xi[slices] + xi[...] = x + xi = xi.view(x.__class__) + assert_(np.all(xi == x)) + yield xi, "stride_" + "_".join(["%+d" % j for j in repeats]) + + # generate also zero strides if possible + if x.ndim >= 1 and x.shape[-1] == 1: + s = list(x.strides) + s[-1] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0" + if x.ndim >= 2 and x.shape[-2] == 1: + s = list(x.strides) + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_x" + if x.ndim >= 2 and x.shape[:-2] == (1, 1): + s = list(x.strides) + s[-1] = 0 + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_0" + + +def _make_strided_cases(): + new_cases = [] + for case in CASES: + for a, a_label in _stride_comb_iter(case.a): + for b, b_label in _stride_comb_iter(case.b): + new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b, + tags=case.tags | {'strided'}) + new_cases.append(new_case) + return new_cases + + +CASES += _make_strided_cases() + + +# +# Test different routines against the above cases +# +class LinalgTestCase: + TEST_CASES = CASES + + def check_cases(self, require=set(), exclude=set()): + """ + Run func on each of the cases with all of the tags in require, and none + of the tags in exclude + """ + for case in self.TEST_CASES: + # filter by require and exclude + if case.tags & require != require: + continue + if case.tags & exclude: + continue + + try: + case.check(self.do) + except Exception as e: + msg = f'In test case: {case!r}\n\n' + msg += traceback.format_exc() + raise AssertionError(msg) from e + + +class LinalgSquareTestCase(LinalgTestCase): + + def test_sq_cases(self): + self.check_cases(require={'square'}, + exclude={'generalized', 'size-0'}) + + def test_empty_sq_cases(self): + self.check_cases(require={'square', 'size-0'}, + exclude={'generalized'}) + + +class LinalgNonsquareTestCase(LinalgTestCase): + + def test_nonsq_cases(self): + self.check_cases(require={'nonsquare'}, + exclude={'generalized', 'size-0'}) + + def test_empty_nonsq_cases(self): + self.check_cases(require={'nonsquare', 'size-0'}, + exclude={'generalized'}) + + +class HermitianTestCase(LinalgTestCase): + + def test_herm_cases(self): + self.check_cases(require={'hermitian'}, + exclude={'generalized', 'size-0'}) + + def test_empty_herm_cases(self): + self.check_cases(require={'hermitian', 'size-0'}, + exclude={'generalized'}) + + +class LinalgGeneralizedSquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_sq_cases(self): + self.check_cases(require={'generalized', 'square'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_sq_cases(self): + self.check_cases(require={'generalized', 'square', 'size-0'}) + + +class LinalgGeneralizedNonsquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare', 'size-0'}) + + +class HermitianGeneralizedTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian', 'size-0'}, + exclude={'none'}) + + +def identity_like_generalized(a): + a = asarray(a) + if a.ndim >= 3: + r = np.empty(a.shape, dtype=a.dtype) + r[...] = identity(a.shape[-2]) + return r + else: + return identity(a.shape[0]) + + +class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # kept apart from TestSolve for use for testing with matrices. + def do(self, a, b, tags): + x = linalg.solve(a, b) + if np.array(b).ndim == 1: + # When a is (..., M, M) and b is (M,), it is the same as when b is + # (M, 1), except the result has shape (..., M) + adotx = matmul(a, x[..., None])[..., 0] + assert_almost_equal(np.broadcast_to(b, adotx.shape), adotx) + else: + adotx = matmul(a, x) + assert_almost_equal(b, adotx) + assert_(consistent_subclass(x, b)) + + +class TestSolve(SolveCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.solve(x, x).dtype, dtype) + + def test_1_d(self): + class ArraySubclass(np.ndarray): + pass + a = np.arange(8).reshape(2, 2, 2) + b = np.arange(2).view(ArraySubclass) + result = linalg.solve(a, b) + assert result.shape == (2, 2) + + # If b is anything other than 1-D it should be treated as a stack of + # matrices + b = np.arange(4).reshape(2, 2).view(ArraySubclass) + result = linalg.solve(a, b) + assert result.shape == (2, 2, 2) + + b = np.arange(2).reshape(1, 2).view(ArraySubclass) + assert_raises(ValueError, linalg.solve, a, b) + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + # Test system of 0x0 matrices + a = np.arange(8).reshape(2, 2, 2) + b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, 0:0, :] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # Test errors for non-square and only b's dimension being 0 + assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b) + assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :]) + + # Test broadcasting error + b = np.arange(6).reshape(1, 3, 2) # broadcasting error + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + + # Test zero "single equations" with 0x0 matrices. + b = np.arange(2).view(ArraySubclass) + expected = linalg.solve(a, b)[:, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + b = np.arange(3).reshape(1, 3) + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b) + + def test_0_size_k(self): + # test zero multiple equation (K=0) case. + class ArraySubclass(np.ndarray): + pass + a = np.arange(4).reshape(1, 2, 2) + b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, :, 0:0] + result = linalg.solve(a, b[:, :, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # test both zero. + expected = linalg.solve(a, b)[:, 0:0, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + +class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + a_inv = linalg.inv(a) + assert_almost_equal(matmul(a, a_inv), + identity_like_generalized(a)) + assert_(consistent_subclass(a_inv, a)) + + +class TestInv(InvCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.inv(x).dtype, dtype) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + +class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + ev = linalg.eigvals(a) + evalues, evectors = linalg.eig(a) + assert_almost_equal(ev, evalues) + + +class TestEigvals(EigvalsCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, dtype) + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.complex64) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + res = linalg.eig(a) + eigenvalues, eigenvectors = res.eigenvalues, res.eigenvectors + assert_allclose(matmul(a, eigenvectors), + np.asarray(eigenvectors) * np.asarray(eigenvalues)[..., None, :], + rtol=get_rtol(eigenvalues.dtype)) + assert_(consistent_subclass(eigenvectors, a)) + + +class TestEig(EigCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, dtype) + assert_equal(v.dtype, dtype) + + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, get_complex_dtype(dtype)) + assert_equal(v.dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class SVDBaseTests: + hermitian = False + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + res = linalg.svd(x) + U, S, Vh = res.U, res.S, res.Vh + assert_equal(U.dtype, dtype) + assert_equal(S.dtype, get_real_dtype(dtype)) + assert_equal(Vh.dtype, dtype) + s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian) + assert_equal(s.dtype, get_real_dtype(dtype)) + + +class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + u, s, vt = linalg.svd(a, False) + assert_allclose(a, matmul(np.asarray(u) * np.asarray(s)[..., None, :], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + + +class TestSVD(SVDCases, SVDBaseTests): + def test_empty_identity(self): + """ Empty input should put an identity matrix in u or vh """ + x = np.empty((4, 0)) + u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) + assert_equal(u.shape, (4, 4)) + assert_equal(vh.shape, (0, 0)) + assert_equal(u, np.eye(4)) + + x = np.empty((0, 4)) + u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) + assert_equal(u.shape, (0, 0)) + assert_equal(vh.shape, (4, 4)) + assert_equal(vh, np.eye(4)) + + def test_svdvals(self): + x = np.array([[1, 0.5], [0.5, 1]]) + s_from_svd = linalg.svd(x, compute_uv=False, hermitian=self.hermitian) + s_from_svdvals = linalg.svdvals(x) + assert_almost_equal(s_from_svd, s_from_svdvals) + + +class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + u, s, vt = linalg.svd(a, False, hermitian=True) + assert_allclose(a, matmul(np.asarray(u) * np.asarray(s)[..., None, :], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + def hermitian(mat): + axes = list(range(mat.ndim)) + axes[-1], axes[-2] = axes[-2], axes[-1] + return np.conj(np.transpose(mat, axes=axes)) + + assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape)) + assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape)) + assert_equal(np.sort(s)[..., ::-1], s) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + + +class TestSVDHermitian(SVDHermitianCases, SVDBaseTests): + hermitian = True + + +class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # cond(x, p) for p in (None, 2, -2) + + def do(self, a, b, tags): + c = asarray(a) # a might be a matrix + if 'size-0' in tags: + assert_raises(LinAlgError, linalg.cond, c) + return + + # +-2 norms + s = linalg.svd(c, compute_uv=False) + assert_almost_equal( + linalg.cond(a), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 2), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -2), s[..., -1] / s[..., 0], + single_decimal=5, double_decimal=11) + + # Other norms + cinv = np.linalg.inv(c) + assert_almost_equal( + linalg.cond(a, 1), + abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -1), + abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, np.inf), + abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -np.inf), + abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 'fro'), + np.sqrt((abs(c)**2).sum(-1).sum(-1) + * (abs(cinv)**2).sum(-1).sum(-1)), + single_decimal=5, double_decimal=11) + + +class TestCond(CondCases): + def test_basic_nonsvd(self): + # Smoketest the non-svd norms + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) + assert_almost_equal(linalg.cond(A, inf), 4) + assert_almost_equal(linalg.cond(A, -inf), 2/3) + assert_almost_equal(linalg.cond(A, 1), 4) + assert_almost_equal(linalg.cond(A, -1), 0.5) + assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) + + def test_singular(self): + # Singular matrices have infinite condition number for + # positive norms, and negative norms shouldn't raise + # exceptions + As = [np.zeros((2, 2)), np.ones((2, 2))] + p_pos = [None, 1, 2, 'fro'] + p_neg = [-1, -2] + for A, p in itertools.product(As, p_pos): + # Inversion may not hit exact infinity, so just check the + # number is large + assert_(linalg.cond(A, p) > 1e15) + for A, p in itertools.product(As, p_neg): + linalg.cond(A, p) + + @pytest.mark.xfail(True, run=False, + reason="Platform/LAPACK-dependent failure, " + "see gh-18914") + def test_nan(self): + # nans should be passed through, not converted to infs + ps = [None, 1, -1, 2, -2, 'fro'] + p_pos = [None, 1, 2, 'fro'] + + A = np.ones((2, 2)) + A[0,1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(isinstance(c, np.float64)) + assert_(np.isnan(c)) + + A = np.ones((3, 2, 2)) + A[1,0,1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(np.isnan(c[1])) + if p in p_pos: + assert_(c[0] > 1e15) + assert_(c[2] > 1e15) + else: + assert_(not np.isnan(c[0])) + assert_(not np.isnan(c[2])) + + def test_stacked_singular(self): + # Check behavior when only some of the stacked matrices are + # singular + np.random.seed(1234) + A = np.random.rand(2, 2, 2, 2) + A[0,0] = 0 + A[1,1] = 0 + + for p in (None, 1, 2, 'fro', -1, -2): + c = linalg.cond(A, p) + assert_equal(c[0,0], np.inf) + assert_equal(c[1,1], np.inf) + assert_(np.isfinite(c[0,1])) + assert_(np.isfinite(c[1,0])) + + +class PinvCases(LinalgSquareTestCase, + LinalgNonsquareTestCase, + LinalgGeneralizedSquareTestCase, + LinalgGeneralizedNonsquareTestCase): + + def do(self, a, b, tags): + a_ginv = linalg.pinv(a) + # `a @ a_ginv == I` does not hold if a is singular + dot = matmul + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinv(PinvCases): + pass + + +class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + a_ginv = linalg.pinv(a, hermitian=True) + # `a @ a_ginv == I` does not hold if a is singular + dot = matmul + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinvHermitian(PinvHermitianCases): + pass + + +def test_pinv_rtol_arg(): + a = np.array([[1, 2, 3], [4, 1, 1], [2, 3, 1]]) + + assert_almost_equal( + np.linalg.pinv(a, rcond=0.5), + np.linalg.pinv(a, rtol=0.5), + ) + + with pytest.raises( + ValueError, match=r"`rtol` and `rcond` can't be both set." + ): + np.linalg.pinv(a, rcond=0.5, rtol=0.5) + + +class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + d = linalg.det(a) + res = linalg.slogdet(a) + s, ld = res.sign, res.logabsdet + if asarray(a).dtype.type in (single, double): + ad = asarray(a).astype(double) + else: + ad = asarray(a).astype(cdouble) + ev = linalg.eigvals(ad) + assert_almost_equal(d, multiply.reduce(ev, axis=-1)) + assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) + + s = np.atleast_1d(s) + ld = np.atleast_1d(ld) + m = (s != 0) + assert_almost_equal(np.abs(s[m]), 1) + assert_equal(ld[~m], -inf) + + +class TestDet(DetCases): + def test_zero(self): + assert_equal(linalg.det([[0.0]]), 0.0) + assert_equal(type(linalg.det([[0.0]])), double) + assert_equal(linalg.det([[0.0j]]), 0.0) + assert_equal(type(linalg.det([[0.0j]])), cdouble) + + assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) + assert_equal(type(linalg.slogdet([[0.0]])[0]), double) + assert_equal(type(linalg.slogdet([[0.0]])[1]), double) + assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) + assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) + assert_equal(type(linalg.slogdet([[0.0j]])[1]), double) + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(np.linalg.det(x).dtype, dtype) + ph, s = np.linalg.slogdet(x) + assert_equal(s.dtype, get_real_dtype(dtype)) + assert_equal(ph.dtype, dtype) + + def test_0_size(self): + a = np.zeros((0, 0), dtype=np.complex64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.complex64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.complex64) + assert_(res[1].dtype.type is np.float32) + + a = np.zeros((0, 0), dtype=np.float64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.float64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.float64) + assert_(res[1].dtype.type is np.float64) + + +class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase): + + def do(self, a, b, tags): + arr = np.asarray(a) + m, n = arr.shape + u, s, vt = linalg.svd(a, False) + x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1) + if m == 0: + assert_((x == 0).all()) + if m <= n: + assert_almost_equal(b, dot(a, x)) + assert_equal(rank, m) + else: + assert_equal(rank, n) + assert_almost_equal(sv, sv.__array_wrap__(s)) + if rank == n and m > n: + expect_resids = ( + np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0) + expect_resids = np.asarray(expect_resids) + if np.asarray(b).ndim == 1: + expect_resids.shape = (1,) + assert_equal(residuals.shape, expect_resids.shape) + else: + expect_resids = np.array([]).view(type(x)) + assert_almost_equal(residuals, expect_resids) + assert_(np.issubdtype(residuals.dtype, np.floating)) + assert_(consistent_subclass(x, b)) + assert_(consistent_subclass(residuals, b)) + + +class TestLstsq(LstsqCases): + def test_rcond(self): + a = np.array([[0., 1., 0., 1., 2., 0.], + [0., 2., 0., 0., 1., 0.], + [1., 0., 1., 0., 0., 4.], + [0., 0., 0., 2., 3., 0.]]).T + + b = np.array([1, 0, 0, 0, 0, 0]) + + x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1) + assert_(rank == 4) + x, residuals, rank, s = linalg.lstsq(a, b) + assert_(rank == 3) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + assert_(rank == 3) + + @pytest.mark.parametrize(["m", "n", "n_rhs"], [ + (4, 2, 2), + (0, 4, 1), + (0, 4, 2), + (4, 0, 1), + (4, 0, 2), + (4, 2, 0), + (0, 0, 0) + ]) + def test_empty_a_b(self, m, n, n_rhs): + a = np.arange(m * n).reshape(m, n) + b = np.ones((m, n_rhs)) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + if m == 0: + assert_((x == 0).all()) + assert_equal(x.shape, (n, n_rhs)) + assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,))) + if m > n and n_rhs > 0: + # residuals are exactly the squared norms of b's columns + r = b - np.dot(a, x) + assert_almost_equal(residuals, (r * r).sum(axis=-2)) + assert_equal(rank, min(m, n)) + assert_equal(s.shape, (min(m, n),)) + + def test_incompatible_dims(self): + # use modified version of docstring example + x = np.array([0, 1, 2, 3]) + y = np.array([-1, 0.2, 0.9, 2.1, 3.3]) + A = np.vstack([x, np.ones(len(x))]).T + with assert_raises_regex(LinAlgError, "Incompatible dimensions"): + linalg.lstsq(A, y, rcond=None) + + +@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO']) +class TestMatrixPower: + + rshft_0 = np.eye(4) + rshft_1 = rshft_0[[3, 0, 1, 2]] + rshft_2 = rshft_0[[2, 3, 0, 1]] + rshft_3 = rshft_0[[1, 2, 3, 0]] + rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] + noninv = array([[1, 0], [0, 0]]) + stacked = np.block([[[rshft_0]]]*2) + #FIXME the 'e' dtype might work in future + dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] + + def test_large_power(self, dt): + rshft = self.rshft_1.astype(dt) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3) + + def test_power_is_zero(self, dt): + def tz(M): + mz = matrix_power(M, 0) + assert_equal(mz, identity_like_generalized(M)) + assert_equal(mz.dtype, M.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_one(self, dt): + def tz(mat): + mz = matrix_power(mat, 1) + assert_equal(mz, mat) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_two(self, dt): + def tz(mat): + mz = matrix_power(mat, 2) + mmul = matmul if mat.dtype != object else dot + assert_equal(mz, mmul(mat, mat)) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_minus_one(self, dt): + def tz(mat): + invmat = matrix_power(mat, -1) + mmul = matmul if mat.dtype != object else dot + assert_almost_equal( + mmul(invmat, mat), identity_like_generalized(mat)) + + for mat in self.rshft_all: + if dt not in self.dtnoinv: + tz(mat.astype(dt)) + + def test_exceptions_bad_power(self, dt): + mat = self.rshft_0.astype(dt) + assert_raises(TypeError, matrix_power, mat, 1.5) + assert_raises(TypeError, matrix_power, mat, [1]) + + def test_exceptions_non_square(self, dt): + assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1) + assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1) + assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_exceptions_not_invertible(self, dt): + if dt in self.dtnoinv: + return + mat = self.noninv.astype(dt) + assert_raises(LinAlgError, matrix_power, mat, -1) + + +class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + ev = linalg.eigvalsh(a, 'L') + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype)) + + ev2 = linalg.eigvalsh(a, 'U') + assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype)) + + +class TestEigvalsh: + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w = np.linalg.eigvalsh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigvalsh, x, "lower") + assert_raises(ValueError, np.linalg.eigvalsh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w = np.linalg.eigvalsh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w = np.linalg.eigvalsh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w = np.linalg.eigvalsh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w = np.linalg.eigvalsh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w = np.linalg.eigvalsh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float32) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + res = linalg.eigh(a) + ev, evc = res.eigenvalues, res.eigenvectors + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_almost_equal(ev, evalues) + + assert_allclose(matmul(a, evc), + np.asarray(ev)[..., None, :] * np.asarray(evc), + rtol=get_rtol(ev.dtype)) + + ev2, evc2 = linalg.eigh(a, 'U') + assert_almost_equal(ev2, evalues) + + assert_allclose(matmul(a, evc2), + np.asarray(ev2)[..., None, :] * np.asarray(evc2), + rtol=get_rtol(ev.dtype), err_msg=repr(a)) + + +class TestEigh: + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eigh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + assert_equal(v.dtype, dtype) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigh, x, "lower") + assert_raises(ValueError, np.linalg.eigh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w, v = np.linalg.eigh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w, v = np.linalg.eigh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w, v = np.linalg.eigh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w, v = np.linalg.eigh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w, v = np.linalg.eigh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.float32) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class _TestNormBase: + dt = None + dec = None + + @staticmethod + def check_dtype(x, res): + if issubclass(x.dtype.type, np.inexact): + assert_equal(res.dtype, x.real.dtype) + else: + # For integer input, don't have to test float precision of output. + assert_(issubclass(res.dtype.type, np.floating)) + + +class _TestNormGeneral(_TestNormBase): + + def test_empty(self): + assert_equal(norm([]), 0.0) + assert_equal(norm(array([], dtype=self.dt)), 0.0) + assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) + + def test_vector_return_type(self): + a = np.array([1, 0, 1]) + + exact_types = np.typecodes['AllInteger'] + inexact_types = np.typecodes['AllFloat'] + + all_types = exact_types + inexact_types + + for each_type in all_types: + at = a.astype(each_type) + + an = norm(at, -np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 0.0) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered") + an = norm(at, -1) + self.check_dtype(at, an) + assert_almost_equal(an, 0.0) + + an = norm(at, 0) + self.check_dtype(at, an) + assert_almost_equal(an, 2) + + an = norm(at, 1) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + self.check_dtype(at, an) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0)) + + an = norm(at, 4) + self.check_dtype(at, an) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0)) + + an = norm(at, np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + def test_vector(self): + a = [1, 2, 3, 4] + b = [-1, -2, -3, -4] + c = [-1, 2, -3, 4] + + def _test(v): + np.testing.assert_almost_equal(norm(v), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, inf), 4.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -inf), 1.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 1), 10.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5), + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 0), 4, + decimal=self.dec) + + for v in (a, b, c,): + _test(v) + + for v in (array(a, dtype=self.dt), array(b, dtype=self.dt), + array(c, dtype=self.dt)): + _test(v) + + def test_axis(self): + # Vector norms. + # Compare the use of `axis` with computing the norm of each row + # or column separately. + A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: + expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] + assert_almost_equal(norm(A, ord=order, axis=0), expected0) + expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])] + assert_almost_equal(norm(A, ord=order, axis=1), expected1) + + # Matrix norms. + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + nd = B.ndim + for order in [None, -2, 2, -1, 1, np.inf, -np.inf, 'fro']: + for axis in itertools.combinations(range(-nd, nd), 2): + row_axis, col_axis = axis + if row_axis < 0: + row_axis += nd + if col_axis < 0: + col_axis += nd + if row_axis == col_axis: + assert_raises(ValueError, norm, B, ord=order, axis=axis) + else: + n = norm(B, ord=order, axis=axis) + + # The logic using k_index only works for nd = 3. + # This has to be changed if nd is increased. + k_index = nd - (row_axis + col_axis) + if row_axis < col_axis: + expected = [norm(B[:].take(k, axis=k_index), ord=order) + for k in range(B.shape[k_index])] + else: + expected = [norm(B[:].take(k, axis=k_index).T, ord=order) + for k in range(B.shape[k_index])] + assert_almost_equal(n, expected) + + def test_keepdims(self): + A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + allclose_err = 'order {0}, axis = {1}' + shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}' + + # check the order=None, axis=None case + expected = norm(A, ord=None, axis=None) + found = norm(A, ord=None, axis=None, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(None, None)) + expected_shape = (1, 1, 1) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, None, None)) + + # Vector norms. + for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: + for k in range(A.ndim): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + # Matrix norms. + for order in [None, -2, 2, -1, 1, np.inf, -np.inf, 'fro', 'nuc']: + for k in itertools.permutations(range(A.ndim), 2): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k[0]] = 1 + expected_shape[k[1]] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + +class _TestNorm2D(_TestNormBase): + # Define the part for 2d arrays separately, so we can subclass this + # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg. + array = np.array + + def test_matrix_empty(self): + assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0) + + def test_matrix_return_type(self): + a = self.array([[1, 0, 1], [0, 1, 1]]) + + exact_types = np.typecodes['AllInteger'] + + # float32, complex64, float64, complex128 types are the only types + # allowed by `linalg`, which performs the matrix operations used + # within `norm`. + inexact_types = 'fdFD' + + all_types = exact_types + inexact_types + + for each_type in all_types: + at = a.astype(each_type) + + an = norm(at, -np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered") + an = norm(at, -1) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + an = norm(at, 1) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + self.check_dtype(at, an) + assert_almost_equal(an, 3.0**(1.0/2.0)) + + an = norm(at, -2) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + an = norm(at, np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 'fro') + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 'nuc') + self.check_dtype(at, an) + # Lower bar needed to support low precision floats. + # They end up being off by 1 in the 7th place. + np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6) + + def test_matrix_2x2(self): + A = self.array([[1, 3], [5, 7]], dtype=self.dt) + assert_almost_equal(norm(A), 84 ** 0.5) + assert_almost_equal(norm(A, 'fro'), 84 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 10.0) + assert_almost_equal(norm(A, inf), 12.0) + assert_almost_equal(norm(A, -inf), 4.0) + assert_almost_equal(norm(A, 1), 10.0) + assert_almost_equal(norm(A, -1), 6.0) + assert_almost_equal(norm(A, 2), 9.1231056256176615) + assert_almost_equal(norm(A, -2), 0.87689437438234041) + + assert_raises(ValueError, norm, A, 'nofro') + assert_raises(ValueError, norm, A, -3) + assert_raises(ValueError, norm, A, 0) + + def test_matrix_3x3(self): + # This test has been added because the 2x2 example + # happened to have equal nuclear norm and induced 1-norm. + # The 1/10 scaling factor accommodates the absolute tolerance + # used in assert_almost_equal. + A = (1 / 10) * \ + self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt) + assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836) + assert_almost_equal(norm(A, inf), 1.1) + assert_almost_equal(norm(A, -inf), 0.6) + assert_almost_equal(norm(A, 1), 1.0) + assert_almost_equal(norm(A, -1), 0.4) + assert_almost_equal(norm(A, 2), 0.88722940323461277) + assert_almost_equal(norm(A, -2), 0.19456584790481812) + + def test_bad_args(self): + # Check that bad arguments raise the appropriate exceptions. + + A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + # Using `axis=` or passing in a 1-D array implies vector + # norms are being computed, so also using `ord='fro'` + # or `ord='nuc'` or any other string raises a ValueError. + assert_raises(ValueError, norm, A, 'fro', 0) + assert_raises(ValueError, norm, A, 'nuc', 0) + assert_raises(ValueError, norm, [3, 4], 'fro', None) + assert_raises(ValueError, norm, [3, 4], 'nuc', None) + assert_raises(ValueError, norm, [3, 4], 'test', None) + + # Similarly, norm should raise an exception when ord is any finite + # number other than 1, 2, -1 or -2 when computing matrix norms. + for order in [0, 3]: + assert_raises(ValueError, norm, A, order, None) + assert_raises(ValueError, norm, A, order, (0, 1)) + assert_raises(ValueError, norm, B, order, (1, 2)) + + # Invalid axis + assert_raises(AxisError, norm, B, None, 3) + assert_raises(AxisError, norm, B, None, (2, 3)) + assert_raises(ValueError, norm, B, None, (0, 1, 2)) + + +class _TestNorm(_TestNorm2D, _TestNormGeneral): + pass + + +class TestNorm_NonSystematic: + + def test_longdouble_norm(self): + # Non-regression test: p-norm of longdouble would previously raise + # UnboundLocalError. + x = np.arange(10, dtype=np.longdouble) + old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2) + + def test_intmin(self): + # Non-regression test: p-norm of signed integer would previously do + # float cast and abs in the wrong order. + x = np.array([-2 ** 31], dtype=np.int32) + old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5) + + def test_complex_high_ord(self): + # gh-4156 + d = np.empty((2,), dtype=np.clongdouble) + d[0] = 6 + 7j + d[1] = -6 + 7j + res = 11.615898132184 + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10) + d = d.astype(np.complex128) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9) + d = d.astype(np.complex64) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5) + + +# Separate definitions so we can use them for matrix tests. +class _TestNormDoubleBase(_TestNormBase): + dt = np.double + dec = 12 + + +class _TestNormSingleBase(_TestNormBase): + dt = np.float32 + dec = 6 + + +class _TestNormInt64Base(_TestNormBase): + dt = np.int64 + dec = 12 + + +class TestNormDouble(_TestNorm, _TestNormDoubleBase): + pass + + +class TestNormSingle(_TestNorm, _TestNormSingleBase): + pass + + +class TestNormInt64(_TestNorm, _TestNormInt64Base): + pass + + +class TestMatrixRank: + + def test_matrix_rank(self): + # Full rank matrix + assert_equal(4, matrix_rank(np.eye(4))) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(matrix_rank(I), 3) + # All zeros - zero rank + assert_equal(matrix_rank(np.zeros((4, 4))), 0) + # 1 dimension - rank 1 unless all 0 + assert_equal(matrix_rank([1, 0, 0, 0]), 1) + assert_equal(matrix_rank(np.zeros((4,))), 0) + # accepts array-like + assert_equal(matrix_rank([1]), 1) + # greater than 2 dimensions treated as stacked matrices + ms = np.array([I, np.eye(4), np.zeros((4,4))]) + assert_equal(matrix_rank(ms), np.array([3, 4, 0])) + # works on scalar + assert_equal(matrix_rank(1), 1) + + with assert_raises_regex( + ValueError, "`tol` and `rtol` can\'t be both set." + ): + matrix_rank(I, tol=0.01, rtol=0.01) + + def test_symmetric_rank(self): + assert_equal(4, matrix_rank(np.eye(4), hermitian=True)) + assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True)) + assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True)) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(3, matrix_rank(I, hermitian=True)) + # manually supplied tolerance + I[-1, -1] = 1e-8 + assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8)) + assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8)) + + +def test_reduced_rank(): + # Test matrices with reduced rank + rng = np.random.RandomState(20120714) + for i in range(100): + # Make a rank deficient matrix + X = rng.normal(size=(40, 10)) + X[:, 0] = X[:, 1] + X[:, 2] + # Assert that matrix_rank detected deficiency + assert_equal(matrix_rank(X), 9) + X[:, 3] = X[:, 4] + X[:, 5] + assert_equal(matrix_rank(X), 8) + + +class TestQR: + # Define the array class here, so run this on matrices elsewhere. + array = np.array + + def check_qr(self, a): + # This test expects the argument `a` to be an ndarray or + # a subclass of an ndarray of inexact type. + a_type = type(a) + a_dtype = a.dtype + m, n = a.shape + k = min(m, n) + + # mode == 'complete' + res = linalg.qr(a, mode='complete') + Q, R = res.Q, res.R + assert_(Q.dtype == a_dtype) + assert_(R.dtype == a_dtype) + assert_(isinstance(Q, a_type)) + assert_(isinstance(R, a_type)) + assert_(Q.shape == (m, m)) + assert_(R.shape == (m, n)) + assert_almost_equal(dot(Q, R), a) + assert_almost_equal(dot(Q.T.conj(), Q), np.eye(m)) + assert_almost_equal(np.triu(R), R) + + # mode == 'reduced' + q1, r1 = linalg.qr(a, mode='reduced') + assert_(q1.dtype == a_dtype) + assert_(r1.dtype == a_dtype) + assert_(isinstance(q1, a_type)) + assert_(isinstance(r1, a_type)) + assert_(q1.shape == (m, k)) + assert_(r1.shape == (k, n)) + assert_almost_equal(dot(q1, r1), a) + assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k)) + assert_almost_equal(np.triu(r1), r1) + + # mode == 'r' + r2 = linalg.qr(a, mode='r') + assert_(r2.dtype == a_dtype) + assert_(isinstance(r2, a_type)) + assert_almost_equal(r2, r1) + + + @pytest.mark.parametrize(["m", "n"], [ + (3, 0), + (0, 3), + (0, 0) + ]) + def test_qr_empty(self, m, n): + k = min(m, n) + a = np.empty((m, n)) + + self.check_qr(a) + + h, tau = np.linalg.qr(a, mode='raw') + assert_equal(h.dtype, np.double) + assert_equal(tau.dtype, np.double) + assert_equal(h.shape, (n, m)) + assert_equal(tau.shape, (k,)) + + def test_mode_raw(self): + # The factorization is not unique and varies between libraries, + # so it is not possible to check against known values. Functional + # testing is a possibility, but awaits the exposure of more + # of the functions in lapack_lite. Consequently, this test is + # very limited in scope. Note that the results are in FORTRAN + # order, hence the h arrays are transposed. + a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double) + + # Test double + h, tau = linalg.qr(a, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (2, 3)) + assert_(tau.shape == (2,)) + + h, tau = linalg.qr(a.T, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (3, 2)) + assert_(tau.shape == (2,)) + + def test_mode_all_but_economic(self): + a = self.array([[1, 2], [3, 4]]) + b = self.array([[1, 2], [3, 4], [5, 6]]) + for dt in "fd": + m1 = a.astype(dt) + m2 = b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + for dt in "fd": + m1 = 1 + 1j * a.astype(dt) + m2 = 1 + 1j * b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + def check_qr_stacked(self, a): + # This test expects the argument `a` to be an ndarray or + # a subclass of an ndarray of inexact type. + a_type = type(a) + a_dtype = a.dtype + m, n = a.shape[-2:] + k = min(m, n) + + # mode == 'complete' + q, r = linalg.qr(a, mode='complete') + assert_(q.dtype == a_dtype) + assert_(r.dtype == a_dtype) + assert_(isinstance(q, a_type)) + assert_(isinstance(r, a_type)) + assert_(q.shape[-2:] == (m, m)) + assert_(r.shape[-2:] == (m, n)) + assert_almost_equal(matmul(q, r), a) + I_mat = np.identity(q.shape[-1]) + stack_I_mat = np.broadcast_to(I_mat, + q.shape[:-2] + (q.shape[-1],)*2) + assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat) + assert_almost_equal(np.triu(r[..., :, :]), r) + + # mode == 'reduced' + q1, r1 = linalg.qr(a, mode='reduced') + assert_(q1.dtype == a_dtype) + assert_(r1.dtype == a_dtype) + assert_(isinstance(q1, a_type)) + assert_(isinstance(r1, a_type)) + assert_(q1.shape[-2:] == (m, k)) + assert_(r1.shape[-2:] == (k, n)) + assert_almost_equal(matmul(q1, r1), a) + I_mat = np.identity(q1.shape[-1]) + stack_I_mat = np.broadcast_to(I_mat, + q1.shape[:-2] + (q1.shape[-1],)*2) + assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1), + stack_I_mat) + assert_almost_equal(np.triu(r1[..., :, :]), r1) + + # mode == 'r' + r2 = linalg.qr(a, mode='r') + assert_(r2.dtype == a_dtype) + assert_(isinstance(r2, a_type)) + assert_almost_equal(r2, r1) + + @pytest.mark.parametrize("size", [ + (3, 4), (4, 3), (4, 4), + (3, 0), (0, 3)]) + @pytest.mark.parametrize("outer_size", [ + (2, 2), (2,), (2, 3, 4)]) + @pytest.mark.parametrize("dt", [ + np.single, np.double, + np.csingle, np.cdouble]) + def test_stacked_inputs(self, outer_size, size, dt): + + rng = np.random.default_rng(123) + A = rng.normal(size=outer_size + size).astype(dt) + B = rng.normal(size=outer_size + size).astype(dt) + self.check_qr_stacked(A) + self.check_qr_stacked(A + 1.j*B) + + +class TestCholesky: + + @pytest.mark.parametrize( + 'shape', [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)] + ) + @pytest.mark.parametrize( + 'dtype', (np.float32, np.float64, np.complex64, np.complex128) + ) + @pytest.mark.parametrize( + 'upper', [False, True]) + def test_basic_property(self, shape, dtype, upper): + np.random.seed(1) + a = np.random.randn(*shape) + if np.issubdtype(dtype, np.complexfloating): + a = a + 1j*np.random.randn(*shape) + + t = list(range(len(shape))) + t[-2:] = -1, -2 + + a = np.matmul(a.transpose(t).conj(), a) + a = np.asarray(a, dtype=dtype) + + c = np.linalg.cholesky(a, upper=upper) + + # Check A = L L^H or A = U^H U + if upper: + b = np.matmul(c.transpose(t).conj(), c) + else: + b = np.matmul(c, c.transpose(t).conj()) + with np._no_nep50_warning(): + atol = 500 * a.shape[0] * np.finfo(dtype).eps + assert_allclose(b, a, atol=atol, err_msg=f'{shape} {dtype}\n{a}\n{c}') + + # Check diag(L or U) is real and positive + d = np.diagonal(c, axis1=-2, axis2=-1) + assert_(np.all(np.isreal(d))) + assert_(np.all(d >= 0)) + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.float64) + # for documentation purpose: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.complex64) + assert_(isinstance(res, np.ndarray)) + + def test_upper_lower_arg(self): + # Explicit test of upper argument that also checks the default. + a = np.array([[1+0j, 0-2j], [0+2j, 5+0j]]) + + assert_equal(linalg.cholesky(a), linalg.cholesky(a, upper=False)) + + assert_equal( + linalg.cholesky(a, upper=True), + linalg.cholesky(a).T.conj() + ) + + +class TestOuter: + arr1 = np.arange(3) + arr2 = np.arange(3) + expected = np.array( + [[0, 0, 0], + [0, 1, 2], + [0, 2, 4]] + ) + + assert_array_equal(np.linalg.outer(arr1, arr2), expected) + + with assert_raises_regex( + ValueError, "Input arrays must be one-dimensional" + ): + np.linalg.outer(arr1[:, np.newaxis], arr2) + + +def test_byteorder_check(): + # Byte order check should pass for native order + if sys.byteorder == 'little': + native = '<' + else: + native = '>' + + for dtt in (np.float32, np.float64): + arr = np.eye(4, dtype=dtt) + n_arr = arr.view(arr.dtype.newbyteorder(native)) + sw_arr = arr.view(arr.dtype.newbyteorder("S")).byteswap() + assert_equal(arr.dtype.byteorder, '=') + for routine in (linalg.inv, linalg.det, linalg.pinv): + # Normal call + res = routine(arr) + # Native but not '=' + assert_array_equal(res, routine(n_arr)) + # Swapped + assert_array_equal(res, routine(sw_arr)) + + +@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") +def test_generalized_raise_multiloop(): + # It should raise an error even if the error doesn't occur in the + # last iteration of the ufunc inner loop + + invertible = np.array([[1, 2], [3, 4]]) + non_invertible = np.array([[1, 1], [1, 1]]) + + x = np.zeros([4, 4, 2, 2])[1::2] + x[...] = invertible + x[0, 0] = non_invertible + + assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) + + +def test_xerbla_override(): + # Check that our xerbla has been successfully linked in. If it is not, + # the default xerbla routine is called, which prints a message to stdout + # and may, or may not, abort the process depending on the LAPACK package. + + XERBLA_OK = 255 + + try: + pid = os.fork() + except (OSError, AttributeError): + # fork failed, or not running on POSIX + pytest.skip("Not POSIX or fork failed.") + + if pid == 0: + # child; close i/o file handles + os.close(1) + os.close(0) + # Avoid producing core files. + import resource + resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) + # These calls may abort. + try: + np.linalg.lapack_lite.xerbla() + except ValueError: + pass + except Exception: + os._exit(os.EX_CONFIG) + + try: + a = np.array([[1.]]) + np.linalg.lapack_lite.dorgqr( + 1, 1, 1, a, + 0, # <- invalid value + a, a, 0, 0) + except ValueError as e: + if "DORGQR parameter number 5" in str(e): + # success, reuse error code to mark success as + # FORTRAN STOP returns as success. + os._exit(XERBLA_OK) + + # Did not abort, but our xerbla was not linked in. + os._exit(os.EX_CONFIG) + else: + # parent + pid, status = os.wait() + if os.WEXITSTATUS(status) != XERBLA_OK: + pytest.skip('Numpy xerbla not linked in.') + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.slow +def test_sdot_bug_8577(): + # Regression test that loading certain other libraries does not + # result to wrong results in float32 linear algebra. + # + # There's a bug gh-8577 on OSX that can trigger this, and perhaps + # there are also other situations in which it occurs. + # + # Do the check in a separate process. + + bad_libs = ['PyQt5.QtWidgets', 'IPython'] + + template = textwrap.dedent(""" + import sys + {before} + try: + import {bad_lib} + except ImportError: + sys.exit(0) + {after} + x = np.ones(2, dtype=np.float32) + sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1) + """) + + for bad_lib in bad_libs: + code = template.format(before="import numpy as np", after="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + # Swapped import order + code = template.format(after="import numpy as np", before="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + +class TestMultiDot: + + def test_basic_function_with_three_arguments(self): + # multi_dot with three arguments uses a fast hand coded algorithm to + # determine the optimal order. Therefore test it separately. + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + + assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C)) + assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C))) + + def test_basic_function_with_two_arguments(self): + # separate code path with two arguments + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + + assert_almost_equal(multi_dot([A, B]), A.dot(B)) + assert_almost_equal(multi_dot([A, B]), np.dot(A, B)) + + def test_basic_function_with_dynamic_programming_optimization(self): + # multi_dot with four or more arguments uses the dynamic programming + # optimization and therefore deserve a separate + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 1)) + assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D)) + + def test_vector_as_first_argument(self): + # The first argument can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 2)) + + # the result should be 1-D + assert_equal(multi_dot([A1d, B, C, D]).shape, (2,)) + + def test_vector_as_last_argument(self): + # The last argument can be 1-D + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be 1-D + assert_equal(multi_dot([A, B, C, D1d]).shape, (6,)) + + def test_vector_as_first_and_last_argument(self): + # The first and last arguments can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be a scalar + assert_equal(multi_dot([A1d, B, C, D1d]).shape, ()) + + def test_three_arguments_and_out(self): + # multi_dot with three arguments uses a fast hand coded algorithm to + # determine the optimal order. Therefore test it separately. + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + + out = np.zeros((6, 2)) + ret = multi_dot([A, B, C], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B).dot(C)) + assert_almost_equal(out, np.dot(A, np.dot(B, C))) + + def test_two_arguments_and_out(self): + # separate code path with two arguments + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + out = np.zeros((6, 6)) + ret = multi_dot([A, B], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B)) + assert_almost_equal(out, np.dot(A, B)) + + def test_dynamic_programming_optimization_and_out(self): + # multi_dot with four or more arguments uses the dynamic programming + # optimization and therefore deserve a separate test + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 1)) + out = np.zeros((6, 1)) + ret = multi_dot([A, B, C, D], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B).dot(C).dot(D)) + + def test_dynamic_programming_logic(self): + # Test for the dynamic programming part + # This test is directly taken from Cormen page 376. + arrays = [np.random.random((30, 35)), + np.random.random((35, 15)), + np.random.random((15, 5)), + np.random.random((5, 10)), + np.random.random((10, 20)), + np.random.random((20, 25))] + m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.], + [0., 0., 2625., 4375., 7125., 10500.], + [0., 0., 0., 750., 2500., 5375.], + [0., 0., 0., 0., 1000., 3500.], + [0., 0., 0., 0., 0., 5000.], + [0., 0., 0., 0., 0., 0.]]) + s_expected = np.array([[0, 1, 1, 3, 3, 3], + [0, 0, 2, 3, 3, 3], + [0, 0, 0, 3, 3, 3], + [0, 0, 0, 0, 4, 5], + [0, 0, 0, 0, 0, 5], + [0, 0, 0, 0, 0, 0]], dtype=int) + s_expected -= 1 # Cormen uses 1-based index, python does not. + + s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True) + + # Only the upper triangular part (without the diagonal) is interesting. + assert_almost_equal(np.triu(s[:-1, 1:]), + np.triu(s_expected[:-1, 1:])) + assert_almost_equal(np.triu(m), np.triu(m_expected)) + + def test_too_few_input_arrays(self): + assert_raises(ValueError, multi_dot, []) + assert_raises(ValueError, multi_dot, [np.random.random((3, 3))]) + + +class TestTensorinv: + + @pytest.mark.parametrize("arr, ind", [ + (np.ones((4, 6, 8, 2)), 2), + (np.ones((3, 3, 2)), 1), + ]) + def test_non_square_handling(self, arr, ind): + with assert_raises(LinAlgError): + linalg.tensorinv(arr, ind=ind) + + @pytest.mark.parametrize("shape, ind", [ + # examples from docstring + ((4, 6, 8, 3), 2), + ((24, 8, 3), 1), + ]) + def test_tensorinv_shape(self, shape, ind): + a = np.eye(24) + a.shape = shape + ainv = linalg.tensorinv(a=a, ind=ind) + expected = a.shape[ind:] + a.shape[:ind] + actual = ainv.shape + assert_equal(actual, expected) + + @pytest.mark.parametrize("ind", [ + 0, -2, + ]) + def test_tensorinv_ind_limit(self, ind): + a = np.eye(24) + a.shape = (4, 6, 8, 3) + with assert_raises(ValueError): + linalg.tensorinv(a=a, ind=ind) + + def test_tensorinv_result(self): + # mimic a docstring example + a = np.eye(24) + a.shape = (24, 8, 3) + ainv = linalg.tensorinv(a, ind=1) + b = np.ones(24) + assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + + +class TestTensorsolve: + + @pytest.mark.parametrize("a, axes", [ + (np.ones((4, 6, 8, 2)), None), + (np.ones((3, 3, 2)), (0, 2)), + ]) + def test_non_square_handling(self, a, axes): + with assert_raises(LinAlgError): + b = np.ones(a.shape[:2]) + linalg.tensorsolve(a, b, axes=axes) + + @pytest.mark.parametrize("shape", + [(2, 3, 6), (3, 4, 4, 3), (0, 3, 3, 0)], + ) + def test_tensorsolve_result(self, shape): + a = np.random.randn(*shape) + b = np.ones(a.shape[:2]) + x = np.linalg.tensorsolve(a, b) + assert_allclose(np.tensordot(a, x, axes=len(x.shape)), b) + + +def test_unsupported_commontype(): + # linalg gracefully handles unsupported type + arr = np.array([[1, -2], [2, 5]], dtype='float16') + with assert_raises_regex(TypeError, "unsupported in linalg"): + linalg.cholesky(arr) + + +#@pytest.mark.slow +#@pytest.mark.xfail(not HAS_LAPACK64, run=False, +# reason="Numpy not compiled with 64-bit BLAS/LAPACK") +#@requires_memory(free_bytes=16e9) +@pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing") +def test_blas64_dot(): + n = 2**32 + a = np.zeros([1, n], dtype=np.float32) + b = np.ones([1, 1], dtype=np.float32) + a[0,-1] = 1 + c = np.dot(b, a) + assert_equal(c[0,-1], 1) + + +@pytest.mark.xfail(not HAS_LAPACK64, + reason="Numpy not compiled with 64-bit BLAS/LAPACK") +def test_blas64_geqrf_lwork_smoketest(): + # Smoke test LAPACK geqrf lwork call with 64-bit integers + dtype = np.float64 + lapack_routine = np.linalg.lapack_lite.dgeqrf + + m = 2**32 + 1 + n = 2**32 + 1 + lda = m + + # Dummy arrays, not referenced by the lapack routine, so don't + # need to be of the right size + a = np.zeros([1, 1], dtype=dtype) + work = np.zeros([1], dtype=dtype) + tau = np.zeros([1], dtype=dtype) + + # Size query + results = lapack_routine(m, n, a, lda, tau, work, -1, 0) + assert_equal(results['info'], 0) + assert_equal(results['m'], m) + assert_equal(results['n'], m) + + # Should result to an integer of a reasonable size + lwork = int(work.item()) + assert_(2**32 < lwork < 2**42) + + +def test_diagonal(): + # Here we only test if selected axes are compatible + # with Array API (last two). Core implementation + # of `diagonal` is tested in `test_multiarray.py`. + x = np.arange(60).reshape((3, 4, 5)) + actual = np.linalg.diagonal(x) + expected = np.array( + [ + [0, 6, 12, 18], + [20, 26, 32, 38], + [40, 46, 52, 58], + ] + ) + assert_equal(actual, expected) + + +def test_trace(): + # Here we only test if selected axes are compatible + # with Array API (last two). Core implementation + # of `trace` is tested in `test_multiarray.py`. + x = np.arange(60).reshape((3, 4, 5)) + actual = np.linalg.trace(x) + expected = np.array([36, 116, 196]) + + assert_equal(actual, expected) + + +def test_cross(): + x = np.arange(9).reshape((3, 3)) + actual = np.linalg.cross(x, x + 1) + expected = np.array([ + [-1, 2, -1], + [-1, 2, -1], + [-1, 2, -1], + ]) + + assert_equal(actual, expected) + + # We test that lists are converted to arrays. + u = [1, 2, 3] + v = [4, 5, 6] + actual = np.linalg.cross(u, v) + expected = array([-3, 6, -3]) + + assert_equal(actual, expected) + + with assert_raises_regex( + ValueError, + r"input arrays must be \(arrays of\) 3-dimensional vectors" + ): + x_2dim = x[:, 1:] + np.linalg.cross(x_2dim, x_2dim) + + +def test_tensordot(): + # np.linalg.tensordot is just an alias for np.tensordot + x = np.arange(6).reshape((2, 3)) + + assert np.linalg.tensordot(x, x) == 55 + assert np.linalg.tensordot(x, x, axes=[(0, 1), (0, 1)]) == 55 + + +def test_matmul(): + # np.linalg.matmul and np.matmul only differs in the number + # of arguments in the signature + x = np.arange(6).reshape((2, 3)) + actual = np.linalg.matmul(x, x.T) + expected = np.array([[5, 14], [14, 50]]) + + assert_equal(actual, expected) + + +def test_matrix_transpose(): + x = np.arange(6).reshape((2, 3)) + actual = np.linalg.matrix_transpose(x) + expected = x.T + + assert_equal(actual, expected) + + with assert_raises_regex( + ValueError, "array must be at least 2-dimensional" + ): + np.linalg.matrix_transpose(x[:, 0]) + + +def test_matrix_norm(): + x = np.arange(9).reshape((3, 3)) + actual = np.linalg.matrix_norm(x) + + assert_almost_equal(actual, np.float64(14.2828), double_decimal=3) + + actual = np.linalg.matrix_norm(x, keepdims=True) + + assert_almost_equal(actual, np.array([[14.2828]]), double_decimal=3) + + +def test_vector_norm(): + x = np.arange(9).reshape((3, 3)) + actual = np.linalg.vector_norm(x) + + assert_almost_equal(actual, np.float64(14.2828), double_decimal=3) + + actual = np.linalg.vector_norm(x, axis=0) + + assert_almost_equal( + actual, np.array([6.7082, 8.124, 9.6436]), double_decimal=3 + ) + + actual = np.linalg.vector_norm(x, keepdims=True) + expected = np.full((1, 1), 14.2828, dtype='float64') + assert_equal(actual.shape, expected.shape) + assert_almost_equal(actual, expected, double_decimal=3) diff --git a/phivenv/Lib/site-packages/numpy/linalg/tests/test_regression.py b/phivenv/Lib/site-packages/numpy/linalg/tests/test_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..c94e95d5a795f246c19d5f650801473d6768113a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/linalg/tests/test_regression.py @@ -0,0 +1,178 @@ +""" Test functions for linalg module +""" +import warnings + +import pytest + +import numpy as np +from numpy import linalg, arange, float64, array, dot, transpose +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_array_equal, + assert_array_almost_equal, assert_array_less +) + + +class TestRegression: + + def test_eig_build(self): + # Ticket #652 + rva = array([1.03221168e+02 + 0.j, + -1.91843603e+01 + 0.j, + -6.04004526e-01 + 15.84422474j, + -6.04004526e-01 - 15.84422474j, + -1.13692929e+01 + 0.j, + -6.57612485e-01 + 10.41755503j, + -6.57612485e-01 - 10.41755503j, + 1.82126812e+01 + 0.j, + 1.06011014e+01 + 0.j, + 7.80732773e+00 + 0.j, + -7.65390898e-01 + 0.j, + 1.51971555e-15 + 0.j, + -1.51308713e-15 + 0.j]) + a = arange(13 * 13, dtype=float64) + a.shape = (13, 13) + a = a % 17 + va, ve = linalg.eig(a) + va.sort() + rva.sort() + assert_array_almost_equal(va, rva) + + def test_eigh_build(self): + # Ticket 662. + rvals = [68.60568999, 89.57756725, 106.67185574] + + cov = array([[77.70273908, 3.51489954, 15.64602427], + [3.51489954, 88.97013878, -1.07431931], + [15.64602427, -1.07431931, 98.18223512]]) + + vals, vecs = linalg.eigh(cov) + assert_array_almost_equal(vals, rvals) + + def test_svd_build(self): + # Ticket 627. + a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]]) + m, n = a.shape + u, s, vh = linalg.svd(a) + + b = dot(transpose(u[:, n:]), a) + + assert_array_almost_equal(b, np.zeros((2, 2))) + + def test_norm_vector_badarg(self): + # Regression for #786: Frobenius norm for vectors raises + # ValueError. + assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') + + def test_lapack_endian(self): + # For bug #1482 + a = array([[5.7998084, -2.1825367], + [-2.1825367, 9.85910595]], dtype='>f8') + b = array(a, dtype=' 0.5) + assert_equal(c, 1) + assert_equal(np.linalg.matrix_rank(a), 1) + assert_array_less(1, np.linalg.norm(a, ord=2)) + + w_svdvals = linalg.svdvals(a) + assert_array_almost_equal(w, w_svdvals) + + def test_norm_object_array(self): + # gh-7575 + testvector = np.array([np.array([0, 1]), 0, 0], dtype=object) + + norm = linalg.norm(testvector) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testvector, ord=1) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype != np.dtype('float64')) + + norm = linalg.norm(testvector, ord=2) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(ValueError, linalg.norm, testvector, ord='fro') + assert_raises(ValueError, linalg.norm, testvector, ord='nuc') + assert_raises(ValueError, linalg.norm, testvector, ord=np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord=0) + assert_raises(ValueError, linalg.norm, testvector, ord=-1) + assert_raises(ValueError, linalg.norm, testvector, ord=-2) + + testmatrix = np.array([[np.array([0, 1]), 0, 0], + [0, 0, 0]], dtype=object) + + norm = linalg.norm(testmatrix) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testmatrix, ord='fro') + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc') + assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=0) + assert_raises(ValueError, linalg.norm, testmatrix, ord=1) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-1) + assert_raises(TypeError, linalg.norm, testmatrix, ord=2) + assert_raises(TypeError, linalg.norm, testmatrix, ord=-2) + assert_raises(ValueError, linalg.norm, testmatrix, ord=3) + + def test_lstsq_complex_larger_rhs(self): + # gh-9891 + size = 20 + n_rhs = 70 + G = np.random.randn(size, size) + 1j * np.random.randn(size, size) + u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs) + b = G.dot(u) + # This should work without segmentation fault. + u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None) + # check results just in case + assert_array_almost_equal(u_lstsq, u) + + @pytest.mark.parametrize("upper", [True, False]) + def test_cholesky_empty_array(self, upper): + # gh-25840 - upper=True hung before. + res = np.linalg.cholesky(np.zeros((0, 0)), upper=upper) + assert res.size == 0 + + @pytest.mark.parametrize("rtol", [0.0, [0.0] * 4, np.zeros((4,))]) + def test_matrix_rank_rtol_argument(self, rtol): + # gh-25877 + x = np.zeros((4, 3, 2)) + res = np.linalg.matrix_rank(x, rtol=rtol) + assert res.shape == (4,) + + def test_openblas_threading(self): + # gh-27036 + # Test whether matrix multiplication involving a large matrix always + # gives the same (correct) answer + x = np.arange(500000, dtype=np.float64) + src = np.vstack((x, -10*x)).T + matrix = np.array([[0, 1], [1, 0]]) + expected = np.vstack((-10*x, x)).T # src @ matrix + for i in range(200): + result = src @ matrix + mismatches = (~np.isclose(result, expected)).sum() + if mismatches != 0: + assert False, ("unexpected result from matmul, " + "probably due to OpenBLAS threading issues") diff --git a/phivenv/Lib/site-packages/numpy/ma/API_CHANGES.txt b/phivenv/Lib/site-packages/numpy/ma/API_CHANGES.txt new file mode 100644 index 0000000000000000000000000000000000000000..48c420ed33554f657ec5a0e2d63635aa1b97f061 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/API_CHANGES.txt @@ -0,0 +1,135 @@ +.. -*- rest -*- + +================================================== +API changes in the new masked array implementation +================================================== + +Masked arrays are subclasses of ndarray +--------------------------------------- + +Contrary to the original implementation, masked arrays are now regular +ndarrays:: + + >>> x = masked_array([1,2,3],mask=[0,0,1]) + >>> print isinstance(x, numpy.ndarray) + True + + +``_data`` returns a view of the masked array +-------------------------------------------- + +Masked arrays are composed of a ``_data`` part and a ``_mask``. Accessing the +``_data`` part will return a regular ndarray or any of its subclass, depending +on the initial data:: + + >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]]) + >>> print x._data + [[1 2] + [3 4]] + >>> print type(x._data) + + + +In practice, ``_data`` is implemented as a property, not as an attribute. +Therefore, you cannot access it directly, and some simple tests such as the +following one will fail:: + + >>>x._data is x._data + False + + +``filled(x)`` can return a subclass of ndarray +---------------------------------------------- +The function ``filled(a)`` returns an array of the same type as ``a._data``:: + + >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]]) + >>> y = filled(x) + >>> print type(y) + + >>> print y + matrix([[ 1, 2], + [ 3, 999999]]) + + +``put``, ``putmask`` behave like their ndarray counterparts +----------------------------------------------------------- + +Previously, ``putmask`` was used like this:: + + mask = [False,True,True] + x = array([1,4,7],mask=mask) + putmask(x,mask,[3]) + +which translated to:: + + x[~mask] = [3] + +(Note that a ``True``-value in a mask suppresses a value.) + +In other words, the mask had the same length as ``x``, whereas +``values`` had ``sum(~mask)`` elements. + +Now, the behaviour is similar to that of ``ndarray.putmask``, where +the mask and the values are both the same length as ``x``, i.e. + +:: + + putmask(x,mask,[3,0,0]) + + +``fill_value`` is a property +---------------------------- + +``fill_value`` is no longer a method, but a property:: + + >>> print x.fill_value + 999999 + +``cumsum`` and ``cumprod`` ignore missing values +------------------------------------------------ + +Missing values are assumed to be the identity element, i.e. 0 for +``cumsum`` and 1 for ``cumprod``:: + + >>> x = N.ma.array([1,2,3,4],mask=[False,True,False,False]) + >>> print x + [1 -- 3 4] + >>> print x.cumsum() + [1 -- 4 8] + >> print x.cumprod() + [1 -- 3 12] + +``bool(x)`` raises a ValueError +------------------------------- + +Masked arrays now behave like regular ``ndarrays``, in that they cannot be +converted to booleans: + +:: + + >>> x = N.ma.array([1,2,3]) + >>> bool(x) + Traceback (most recent call last): + File "", line 1, in + ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() + + +================================== +New features (non exhaustive list) +================================== + +``mr_`` +------- + +``mr_`` mimics the behavior of ``r_`` for masked arrays:: + + >>> np.ma.mr_[3,4,5] + masked_array(data = [3 4 5], + mask = False, + fill_value=999999) + + +``anom`` +-------- + +The ``anom`` method returns the deviations from the average (anomalies). diff --git a/phivenv/Lib/site-packages/numpy/ma/LICENSE b/phivenv/Lib/site-packages/numpy/ma/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f165a0f6dbf57b89e2f6e23b9f042750dde3caab --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/LICENSE @@ -0,0 +1,24 @@ +* Copyright (c) 2006, University of Georgia and Pierre G.F. Gerard-Marchant +* All rights reserved. +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the distribution. +* * Neither the name of the University of Georgia nor the +* names of its contributors may be used to endorse or promote products +* derived from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY +* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/phivenv/Lib/site-packages/numpy/ma/README.rst b/phivenv/Lib/site-packages/numpy/ma/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..0f39221be7fbcf95ecce6b296cbd16c44ac241e1 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/README.rst @@ -0,0 +1,236 @@ +================================== +A guide to masked arrays in NumPy +================================== + +.. Contents:: + +See http://www.scipy.org/scipy/numpy/wiki/MaskedArray (dead link) +for updates of this document. + + +History +------- + +As a regular user of MaskedArray, I (Pierre G.F. Gerard-Marchant) became +increasingly frustrated with the subclassing of masked arrays (even if +I can only blame my inexperience). I needed to develop a class of arrays +that could store some additional information along with numerical values, +while keeping the possibility for missing data (picture storing a series +of dates along with measurements, what would later become the `TimeSeries +Scikit `__ +(dead link). + +I started to implement such a class, but then quickly realized that +any additional information disappeared when processing these subarrays +(for example, adding a constant value to a subarray would erase its +dates). I ended up writing the equivalent of *numpy.core.ma* for my +particular class, ufuncs included. Everything went fine until I needed to +subclass my new class, when more problems showed up: some attributes of +the new subclass were lost during processing. I identified the culprit as +MaskedArray, which returns masked ndarrays when I expected masked +arrays of my class. I was preparing myself to rewrite *numpy.core.ma* +when I forced myself to learn how to subclass ndarrays. As I became more +familiar with the *__new__* and *__array_finalize__* methods, +I started to wonder why masked arrays were objects, and not ndarrays, +and whether it wouldn't be more convenient for subclassing if they did +behave like regular ndarrays. + +The new *maskedarray* is what I eventually come up with. The +main differences with the initial *numpy.core.ma* package are +that MaskedArray is now a subclass of *ndarray* and that the +*_data* section can now be any subclass of *ndarray*. Apart from a +couple of issues listed below, the behavior of the new MaskedArray +class reproduces the old one. Initially the *maskedarray* +implementation was marginally slower than *numpy.ma* in some areas, +but work is underway to speed it up; the expectation is that it can be +made substantially faster than the present *numpy.ma*. + + +Note that if the subclass has some special methods and +attributes, they are not propagated to the masked version: +this would require a modification of the *__getattribute__* +method (first trying *ndarray.__getattribute__*, then trying +*self._data.__getattribute__* if an exception is raised in the first +place), which really slows things down. + +Main differences +---------------- + + * The *_data* part of the masked array can be any subclass of ndarray (but not recarray, cf below). + * *fill_value* is now a property, not a function. + * in the majority of cases, the mask is forced to *nomask* when no value is actually masked. A notable exception is when a masked array (with no masked values) has just been unpickled. + * I got rid of the *share_mask* flag, I never understood its purpose. + * *put*, *putmask* and *take* now mimic the ndarray methods, to avoid unpleasant surprises. Moreover, *put* and *putmask* both update the mask when needed. * if *a* is a masked array, *bool(a)* raises a *ValueError*, as it does with ndarrays. + * in the same way, the comparison of two masked arrays is a masked array, not a boolean + * *filled(a)* returns an array of the same subclass as *a._data*, and no test is performed on whether it is contiguous or not. + * the mask is always printed, even if it's *nomask*, which makes things easy (for me at least) to remember that a masked array is used. + * *cumsum* works as if the *_data* array was filled with 0. The mask is preserved, but not updated. + * *cumprod* works as if the *_data* array was filled with 1. The mask is preserved, but not updated. + +New features +------------ + +This list is non-exhaustive... + + * the *mr_* function mimics *r_* for masked arrays. + * the *anom* method returns the anomalies (deviations from the average) + +Using the new package with numpy.core.ma +---------------------------------------- + +I tried to make sure that the new package can understand old masked +arrays. Unfortunately, there's no upward compatibility. + +For example: + +>>> import numpy.core.ma as old_ma +>>> import maskedarray as new_ma +>>> x = old_ma.array([1,2,3,4,5], mask=[0,0,1,0,0]) +>>> x +array(data = + [ 1 2 999999 4 5], + mask = + [False False True False False], + fill_value=999999) +>>> y = new_ma.array([1,2,3,4,5], mask=[0,0,1,0,0]) +>>> y +array(data = [1 2 -- 4 5], + mask = [False False True False False], + fill_value=999999) +>>> x==y +array(data = + [True True True True True], + mask = + [False False True False False], + fill_value=?) +>>> old_ma.getmask(x) == new_ma.getmask(x) +array([True, True, True, True, True]) +>>> old_ma.getmask(y) == new_ma.getmask(y) +array([True, True, False, True, True]) +>>> old_ma.getmask(y) +False + + +Using maskedarray with matplotlib +--------------------------------- + +Starting with matplotlib 0.91.2, the masked array importing will work with +the maskedarray branch) as well as with earlier versions. + +By default matplotlib still uses numpy.ma, but there is an rcParams setting +that you can use to select maskedarray instead. In the matplotlibrc file +you will find:: + + #maskedarray : False # True to use external maskedarray module + # instead of numpy.ma; this is a temporary # + setting for testing maskedarray. + + +Uncomment and set to True to select maskedarray everywhere. +Alternatively, you can test a script with maskedarray by using a +command-line option, e.g.:: + + python simple_plot.py --maskedarray + + +Masked records +-------------- + +Like *numpy.ma.core*, the *ndarray*-based implementation +of MaskedArray is limited when working with records: you can +mask any record of the array, but not a field in a record. If you +need this feature, you may want to give the *mrecords* package +a try (available in the *maskedarray* directory in the scipy +sandbox). This module defines a new class, *MaskedRecord*. An +instance of this class accepts a *recarray* as data, and uses two +masks: the *fieldmask* has as many entries as records in the array, +each entry with the same fields as a record, but of boolean types: +they indicate whether the field is masked or not; a record entry +is flagged as masked in the *mask* array if all the fields are +masked. A few examples in the file should give you an idea of what +can be done. Note that *mrecords* is still experimental... + +Optimizing maskedarray +---------------------- + +Should masked arrays be filled before processing or not? +-------------------------------------------------------- + +In the current implementation, most operations on masked arrays involve +the following steps: + + * the input arrays are filled + * the operation is performed on the filled arrays + * the mask is set for the results, from the combination of the input masks and the mask corresponding to the domain of the operation. + +For example, consider the division of two masked arrays:: + + import numpy + import maskedarray as ma + x = ma.array([1,2,3,4],mask=[1,0,0,0], dtype=numpy.float64) + y = ma.array([-1,0,1,2], mask=[0,0,0,1], dtype=numpy.float64) + +The division of x by y is then computed as:: + + d1 = x.filled(0) # d1 = array([0., 2., 3., 4.]) + d2 = y.filled(1) # array([-1., 0., 1., 1.]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + result = (d1/d2).view(MaskedArray) # masked_array([-0. inf, 3., 4.]) + result._mask = logical_or(m, dm) + +Note that a division by zero takes place. To avoid it, we can consider +to fill the input arrays, taking the domain mask into account, so that:: + + d1 = x._data.copy() # d1 = array([1., 2., 3., 4.]) + d2 = y._data.copy() # array([-1., 0., 1., 2.]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + numpy.putmask(d2, dm, 1) # d2 = array([-1., 1., 1., 2.]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + result = (d1/d2).view(MaskedArray) # masked_array([-1. 0., 3., 2.]) + result._mask = logical_or(m, dm) + +Note that the *.copy()* is required to avoid updating the inputs with +*putmask*. The *.filled()* method also involves a *.copy()*. + +A third possibility consists in avoid filling the arrays:: + + d1 = x._data # d1 = array([1., 2., 3., 4.]) + d2 = y._data # array([-1., 0., 1., 2.]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + result = (d1/d2).view(MaskedArray) # masked_array([-1. inf, 3., 2.]) + result._mask = logical_or(m, dm) + +Note that here again the division by zero takes place. + +A quick benchmark gives the following results: + + * *numpy.ma.divide* : 2.69 ms per loop + * classical division : 2.21 ms per loop + * division w/ prefilling : 2.34 ms per loop + * division w/o filling : 1.55 ms per loop + +So, is it worth filling the arrays beforehand ? Yes, if we are interested +in avoiding floating-point exceptions that may fill the result with infs +and nans. No, if we are only interested into speed... + + +Thanks +------ + +I'd like to thank Paul Dubois, Travis Oliphant and Sasha for the +original masked array package: without you, I would never have started +that (it might be argued that I shouldn't have anyway, but that's +another story...). I also wish to extend these thanks to Reggie Dugard +and Eric Firing for their suggestions and numerous improvements. + + +Revision notes +-------------- + + * 08/25/2007 : Creation of this page + * 01/23/2007 : The package has been moved to the SciPy sandbox, and is regularly updated: please check out your SVN version! diff --git a/phivenv/Lib/site-packages/numpy/ma/__init__.py b/phivenv/Lib/site-packages/numpy/ma/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b666aaeb6889a369b1bba08fae97d489a5e4c3a4 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/__init__.py @@ -0,0 +1,54 @@ +""" +============= +Masked Arrays +============= + +Arrays sometimes contain invalid or missing data. When doing operations +on such arrays, we wish to suppress invalid values, which is the purpose masked +arrays fulfill (an example of typical use is given below). + +For example, examine the following array: + +>>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan]) + +When we try to calculate the mean of the data, the result is undetermined: + +>>> np.mean(x) +nan + +The mean is calculated using roughly ``np.sum(x)/len(x)``, but since +any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter +masked arrays: + +>>> m = np.ma.masked_array(x, np.isnan(x)) +>>> m +masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --], + mask = [False False False True False False False True], + fill_value=1e+20) + +Here, we construct a masked array that suppress all ``NaN`` values. We +may now proceed to calculate the mean of the other values: + +>>> np.mean(m) +2.6666666666666665 + +.. [1] Not-a-Number, a floating point value that is the result of an + invalid operation. + +.. moduleauthor:: Pierre Gerard-Marchant +.. moduleauthor:: Jarrod Millman + +""" +from . import core +from .core import * + +from . import extras +from .extras import * + +__all__ = ['core', 'extras'] +__all__ += core.__all__ +__all__ += extras.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/phivenv/Lib/site-packages/numpy/ma/__init__.pyi b/phivenv/Lib/site-packages/numpy/ma/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..14d54e140d92e1e0cd3c0d8aaaabe15d02bb293e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/__init__.pyi @@ -0,0 +1,233 @@ +from numpy._pytesttester import PytestTester + +from numpy.ma import extras as extras + +from numpy.ma.core import ( + MAError as MAError, + MaskError as MaskError, + MaskType as MaskType, + MaskedArray as MaskedArray, + abs as abs, + absolute as absolute, + add as add, + all as all, + allclose as allclose, + allequal as allequal, + alltrue as alltrue, + amax as amax, + amin as amin, + angle as angle, + anom as anom, + anomalies as anomalies, + any as any, + append as append, + arange as arange, + arccos as arccos, + arccosh as arccosh, + arcsin as arcsin, + arcsinh as arcsinh, + arctan as arctan, + arctan2 as arctan2, + arctanh as arctanh, + argmax as argmax, + argmin as argmin, + argsort as argsort, + around as around, + array as array, + asanyarray as asanyarray, + asarray as asarray, + bitwise_and as bitwise_and, + bitwise_or as bitwise_or, + bitwise_xor as bitwise_xor, + bool as bool, + ceil as ceil, + choose as choose, + clip as clip, + common_fill_value as common_fill_value, + compress as compress, + compressed as compressed, + concatenate as concatenate, + conjugate as conjugate, + convolve as convolve, + copy as copy, + correlate as correlate, + cos as cos, + cosh as cosh, + count as count, + cumprod as cumprod, + cumsum as cumsum, + default_fill_value as default_fill_value, + diag as diag, + diagonal as diagonal, + diff as diff, + divide as divide, + empty as empty, + empty_like as empty_like, + equal as equal, + exp as exp, + expand_dims as expand_dims, + fabs as fabs, + filled as filled, + fix_invalid as fix_invalid, + flatten_mask as flatten_mask, + flatten_structured_array as flatten_structured_array, + floor as floor, + floor_divide as floor_divide, + fmod as fmod, + frombuffer as frombuffer, + fromflex as fromflex, + fromfunction as fromfunction, + getdata as getdata, + getmask as getmask, + getmaskarray as getmaskarray, + greater as greater, + greater_equal as greater_equal, + harden_mask as harden_mask, + hypot as hypot, + identity as identity, + ids as ids, + indices as indices, + inner as inner, + innerproduct as innerproduct, + isMA as isMA, + isMaskedArray as isMaskedArray, + is_mask as is_mask, + is_masked as is_masked, + isarray as isarray, + left_shift as left_shift, + less as less, + less_equal as less_equal, + log as log, + log10 as log10, + log2 as log2, + logical_and as logical_and, + logical_not as logical_not, + logical_or as logical_or, + logical_xor as logical_xor, + make_mask as make_mask, + make_mask_descr as make_mask_descr, + make_mask_none as make_mask_none, + mask_or as mask_or, + masked as masked, + masked_array as masked_array, + masked_equal as masked_equal, + masked_greater as masked_greater, + masked_greater_equal as masked_greater_equal, + masked_inside as masked_inside, + masked_invalid as masked_invalid, + masked_less as masked_less, + masked_less_equal as masked_less_equal, + masked_not_equal as masked_not_equal, + masked_object as masked_object, + masked_outside as masked_outside, + masked_print_option as masked_print_option, + masked_singleton as masked_singleton, + masked_values as masked_values, + masked_where as masked_where, + max as max, + maximum as maximum, + maximum_fill_value as maximum_fill_value, + mean as mean, + min as min, + minimum as minimum, + minimum_fill_value as minimum_fill_value, + mod as mod, + multiply as multiply, + mvoid as mvoid, + ndim as ndim, + negative as negative, + nomask as nomask, + nonzero as nonzero, + not_equal as not_equal, + ones as ones, + outer as outer, + outerproduct as outerproduct, + power as power, + prod as prod, + product as product, + ptp as ptp, + put as put, + putmask as putmask, + ravel as ravel, + remainder as remainder, + repeat as repeat, + reshape as reshape, + resize as resize, + right_shift as right_shift, + round as round, + set_fill_value as set_fill_value, + shape as shape, + sin as sin, + sinh as sinh, + size as size, + soften_mask as soften_mask, + sometrue as sometrue, + sort as sort, + sqrt as sqrt, + squeeze as squeeze, + std as std, + subtract as subtract, + sum as sum, + swapaxes as swapaxes, + take as take, + tan as tan, + tanh as tanh, + trace as trace, + transpose as transpose, + true_divide as true_divide, + var as var, + where as where, + zeros as zeros, +) + +from numpy.ma.extras import ( + apply_along_axis as apply_along_axis, + apply_over_axes as apply_over_axes, + atleast_1d as atleast_1d, + atleast_2d as atleast_2d, + atleast_3d as atleast_3d, + average as average, + clump_masked as clump_masked, + clump_unmasked as clump_unmasked, + column_stack as column_stack, + compress_cols as compress_cols, + compress_nd as compress_nd, + compress_rowcols as compress_rowcols, + compress_rows as compress_rows, + count_masked as count_masked, + corrcoef as corrcoef, + cov as cov, + diagflat as diagflat, + dot as dot, + dstack as dstack, + ediff1d as ediff1d, + flatnotmasked_contiguous as flatnotmasked_contiguous, + flatnotmasked_edges as flatnotmasked_edges, + hsplit as hsplit, + hstack as hstack, + isin as isin, + in1d as in1d, + intersect1d as intersect1d, + mask_cols as mask_cols, + mask_rowcols as mask_rowcols, + mask_rows as mask_rows, + masked_all as masked_all, + masked_all_like as masked_all_like, + median as median, + mr_ as mr_, + ndenumerate as ndenumerate, + notmasked_contiguous as notmasked_contiguous, + notmasked_edges as notmasked_edges, + polyfit as polyfit, + row_stack as row_stack, + setdiff1d as setdiff1d, + setxor1d as setxor1d, + stack as stack, + unique as unique, + union1d as union1d, + vander as vander, + vstack as vstack, +) + +__all__: list[str] +test: PytestTester diff --git a/phivenv/Lib/site-packages/numpy/ma/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/ma/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5a283ffeae0e1897658873b804b4c253e232f68 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/ma/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/ma/__pycache__/extras.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/ma/__pycache__/extras.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fff131f3c70b5027488cb2a215a7509865b67e2e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/ma/__pycache__/extras.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/ma/__pycache__/mrecords.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/ma/__pycache__/mrecords.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df3eee8bb57b1687cf7e4f141850c429e9bfe50f Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/ma/__pycache__/mrecords.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/ma/__pycache__/testutils.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/ma/__pycache__/testutils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1468b380864fc529c552eb888de23e6824d1959 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/ma/__pycache__/testutils.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/ma/__pycache__/timer_comparison.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/ma/__pycache__/timer_comparison.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fda061e9c0806278a0e0a5a5c15acccf3230c93 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/ma/__pycache__/timer_comparison.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/ma/core.py b/phivenv/Lib/site-packages/numpy/ma/core.py new file mode 100644 index 0000000000000000000000000000000000000000..0ccf5252ee998d94ee949468f9f7869b6529ccbb --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/core.py @@ -0,0 +1,8703 @@ +""" +numpy.ma : a package to handle missing or invalid values. + +This package was initially written for numarray by Paul F. Dubois +at Lawrence Livermore National Laboratory. +In 2006, the package was completely rewritten by Pierre Gerard-Marchant +(University of Georgia) to make the MaskedArray class a subclass of ndarray, +and to improve support of structured arrays. + + +Copyright 1999, 2000, 2001 Regents of the University of California. +Released for unlimited redistribution. + +* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. +* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant + (pgmdevlist_AT_gmail_DOT_com) +* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +# pylint: disable-msg=E1002 +import builtins +import inspect +import operator +import warnings +import textwrap +import re +from functools import reduce +from typing import Dict + +import numpy as np +import numpy._core.umath as umath +import numpy._core.numerictypes as ntypes +from numpy._core import multiarray as mu +from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue, angle +from numpy import array as narray, expand_dims, iinfo, finfo +from numpy._core.numeric import normalize_axis_tuple +from numpy._utils._inspect import getargspec, formatargspec +from numpy._utils import set_module + + +__all__ = [ + 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', + 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', + 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos', + 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', + 'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray', + 'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil', + 'choose', 'clip', 'common_fill_value', 'compress', 'compressed', + 'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh', + 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', + 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp', + 'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask', + 'flatten_structured_array', 'floor', 'floor_divide', 'fmod', + 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask', + 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', + 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', + 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', + 'less', 'less_equal', 'log', 'log10', 'log2', + 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask', + 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', + 'masked_array', 'masked_equal', 'masked_greater', + 'masked_greater_equal', 'masked_inside', 'masked_invalid', + 'masked_less', 'masked_less_equal', 'masked_not_equal', + 'masked_object', 'masked_outside', 'masked_print_option', + 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', + 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', + 'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero', + 'not_equal', 'ones', 'ones_like', 'outer', 'outerproduct', 'power', 'prod', + 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder', + 'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_', + 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask', + 'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', + 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', + 'var', 'where', 'zeros', 'zeros_like', + ] + +MaskType = np.bool +nomask = MaskType(0) + +class MaskedArrayFutureWarning(FutureWarning): + pass + +def _deprecate_argsort_axis(arr): + """ + Adjust the axis passed to argsort, warning if necessary + + Parameters + ---------- + arr + The array which argsort was called on + + np.ma.argsort has a long-term bug where the default of the axis argument + is wrong (gh-8701), which now must be kept for backwards compatibility. + Thankfully, this only makes a difference when arrays are 2- or more- + dimensional, so we only need a warning then. + """ + if arr.ndim <= 1: + # no warning needed - but switch to -1 anyway, to avoid surprising + # subclasses, which are more likely to implement scalar axes. + return -1 + else: + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + warnings.warn( + "In the future the default for argsort will be axis=-1, not the " + "current None, to match its documentation and np.argsort. " + "Explicitly pass -1 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=3) + return None + + +def doc_note(initialdoc, note): + """ + Adds a Notes section to an existing docstring. + + """ + if initialdoc is None: + return + if note is None: + return initialdoc + + notesplit = re.split(r'\n\s*?Notes\n\s*?-----', inspect.cleandoc(initialdoc)) + notedoc = "\n\nNotes\n-----\n%s\n" % inspect.cleandoc(note) + + return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) + + +def get_object_signature(obj): + """ + Get the signature from obj + + """ + try: + sig = formatargspec(*getargspec(obj)) + except TypeError: + sig = '' + return sig + + +############################################################################### +# Exceptions # +############################################################################### + + +class MAError(Exception): + """ + Class for masked array related errors. + + """ + pass + + +class MaskError(MAError): + """ + Class for mask related errors. + + """ + pass + + +############################################################################### +# Filling options # +############################################################################### + + +# b: boolean - c: complex - f: floats - i: integer - O: object - S: string +default_filler = {'b': True, + 'c': 1.e20 + 0.0j, + 'f': 1.e20, + 'i': 999999, + 'O': '?', + 'S': b'N/A', + 'u': 999999, + 'V': b'???', + 'U': 'N/A' + } + +# Add datetime64 and timedelta64 types +for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", + "fs", "as"]: + default_filler["M8[" + v + "]"] = np.datetime64("NaT", v) + default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v) + +float_types_list = [np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble] + +_minvals: Dict[type, int] = {} +_maxvals: Dict[type, int] = {} + +for sctype in ntypes.sctypeDict.values(): + scalar_dtype = np.dtype(sctype) + + if scalar_dtype.kind in "Mm": + info = np.iinfo(np.int64) + min_val, max_val = info.min, info.max + elif np.issubdtype(scalar_dtype, np.integer): + info = np.iinfo(sctype) + min_val, max_val = info.min, info.max + elif np.issubdtype(scalar_dtype, np.floating): + info = np.finfo(sctype) + min_val, max_val = info.min, info.max + elif scalar_dtype.kind == "b": + min_val, max_val = 0, 1 + else: + min_val, max_val = None, None + + _minvals[sctype] = min_val + _maxvals[sctype] = max_val + +max_filler = _minvals +max_filler.update([(k, -np.inf) for k in float_types_list[:4]]) +max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]]) + +min_filler = _maxvals +min_filler.update([(k, +np.inf) for k in float_types_list[:4]]) +min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]]) + +del float_types_list + +def _recursive_fill_value(dtype, f): + """ + Recursively produce a fill value for `dtype`, calling f on scalar dtypes + """ + if dtype.names is not None: + # We wrap into `array` here, which ensures we use NumPy cast rules + # for integer casts, this allows the use of 99999 as a fill value + # for int8. + # TODO: This is probably a mess, but should best preserve behavior? + vals = tuple( + np.array(_recursive_fill_value(dtype[name], f)) + for name in dtype.names) + return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d + elif dtype.subdtype: + subtype, shape = dtype.subdtype + subval = _recursive_fill_value(subtype, f) + return np.full(shape, subval) + else: + return f(dtype) + + +def _get_dtype_of(obj): + """ Convert the argument for *_fill_value into a dtype """ + if isinstance(obj, np.dtype): + return obj + elif hasattr(obj, 'dtype'): + return obj.dtype + else: + return np.asanyarray(obj).dtype + + +def default_fill_value(obj): + """ + Return the default fill value for the argument object. + + The default filling value depends on the datatype of the input + array or the type of the input scalar: + + ======== ======== + datatype default + ======== ======== + bool True + int 999999 + float 1.e20 + complex 1.e20+0j + object '?' + string 'N/A' + ======== ======== + + For structured types, a structured scalar is returned, with each field the + default fill value for its type. + + For subarray types, the fill value is an array of the same size containing + the default scalar fill value. + + Parameters + ---------- + obj : ndarray, dtype or scalar + The array data-type or scalar for which the default fill value + is returned. + + Returns + ------- + fill_value : scalar + The default fill value. + + Examples + -------- + >>> np.ma.default_fill_value(1) + 999999 + >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) + 1e+20 + >>> np.ma.default_fill_value(np.dtype(complex)) + (1e+20+0j) + + """ + def _scalar_fill_value(dtype): + if dtype.kind in 'Mm': + return default_filler.get(dtype.str[1:], '?') + else: + return default_filler.get(dtype.kind, '?') + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def _extremum_fill_value(obj, extremum, extremum_name): + + def _scalar_fill_value(dtype): + try: + return extremum[dtype.type] + except KeyError as e: + raise TypeError( + f"Unsuitable type {dtype} for calculating {extremum_name}." + ) from None + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def minimum_fill_value(obj): + """ + Return the maximum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the minimum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The maximum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + maximum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.int32() + >>> ma.minimum_fill_value(a) + 2147483647 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.minimum_fill_value(a) + inf + + """ + return _extremum_fill_value(obj, min_filler, "minimum") + + +def maximum_fill_value(obj): + """ + Return the minimum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the maximum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The minimum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + minimum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.int32() + >>> ma.maximum_fill_value(a) + -2147483648 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.maximum_fill_value(a) + -inf + + """ + return _extremum_fill_value(obj, max_filler, "maximum") + + +def _recursive_set_fill_value(fillvalue, dt): + """ + Create a fill value for a structured dtype. + + Parameters + ---------- + fillvalue : scalar or array_like + Scalar or array representing the fill value. If it is of shorter + length than the number of fields in dt, it will be resized. + dt : dtype + The structured dtype for which to create the fill value. + + Returns + ------- + val : tuple + A tuple of values corresponding to the structured fill value. + + """ + fillvalue = np.resize(fillvalue, len(dt.names)) + output_value = [] + for (fval, name) in zip(fillvalue, dt.names): + cdtype = dt[name] + if cdtype.subdtype: + cdtype = cdtype.subdtype[0] + + if cdtype.names is not None: + output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) + else: + output_value.append(np.array(fval, dtype=cdtype).item()) + return tuple(output_value) + + +def _check_fill_value(fill_value, ndtype): + """ + Private function validating the given `fill_value` for the given dtype. + + If fill_value is None, it is set to the default corresponding to the dtype. + + If fill_value is not None, its value is forced to the given dtype. + + The result is always a 0d array. + + """ + ndtype = np.dtype(ndtype) + if fill_value is None: + fill_value = default_fill_value(ndtype) + elif ndtype.names is not None: + if isinstance(fill_value, (ndarray, np.void)): + try: + fill_value = np.asarray(fill_value, dtype=ndtype) + except ValueError as e: + err_msg = "Unable to transform %s to dtype %s" + raise ValueError(err_msg % (fill_value, ndtype)) from e + else: + fill_value = np.asarray(fill_value, dtype=object) + fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), + dtype=ndtype) + else: + if isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): + # Note this check doesn't work if fill_value is not a scalar + err_msg = "Cannot set fill value of string with array of dtype %s" + raise TypeError(err_msg % ndtype) + else: + # In case we want to convert 1e20 to int. + # Also in case of converting string arrays. + try: + fill_value = np.asarray(fill_value, dtype=ndtype) + except (OverflowError, ValueError) as e: + # Raise TypeError instead of OverflowError or ValueError. + # OverflowError is seldom used, and the real problem here is + # that the passed fill_value is not compatible with the ndtype. + err_msg = "Cannot convert fill_value %s to dtype %s" + raise TypeError(err_msg % (fill_value, ndtype)) from e + return np.array(fill_value) + + +def set_fill_value(a, fill_value): + """ + Set the filling value of a, if a is a masked array. + + This function changes the fill value of the masked array `a` in place. + If `a` is not a masked array, the function returns silently, without + doing anything. + + Parameters + ---------- + a : array_like + Input array. + fill_value : dtype + Filling value. A consistency test is performed to make sure + the value is compatible with the dtype of `a`. + + Returns + ------- + None + Nothing returned by this function. + + See Also + -------- + maximum_fill_value : Return the default fill value for a dtype. + MaskedArray.fill_value : Return current fill value. + MaskedArray.set_fill_value : Equivalent method. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> a = ma.masked_where(a < 3, a) + >>> a + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=999999) + >>> ma.set_fill_value(a, -999) + >>> a + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=-999) + + Nothing happens if `a` is not a masked array. + + >>> a = list(range(5)) + >>> a + [0, 1, 2, 3, 4] + >>> ma.set_fill_value(a, 100) + >>> a + [0, 1, 2, 3, 4] + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> ma.set_fill_value(a, 100) + >>> a + array([0, 1, 2, 3, 4]) + + """ + if isinstance(a, MaskedArray): + a.set_fill_value(fill_value) + return + + +def get_fill_value(a): + """ + Return the filling value of a, if any. Otherwise, returns the + default filling value for that type. + + """ + if isinstance(a, MaskedArray): + result = a.fill_value + else: + result = default_fill_value(a) + return result + + +def common_fill_value(a, b): + """ + Return the common filling value of two masked arrays, if any. + + If ``a.fill_value == b.fill_value``, return the fill value, + otherwise return None. + + Parameters + ---------- + a, b : MaskedArray + The masked arrays for which to compare fill values. + + Returns + ------- + fill_value : scalar or None + The common fill value, or None. + + Examples + -------- + >>> x = np.ma.array([0, 1.], fill_value=3) + >>> y = np.ma.array([0, 1.], fill_value=3) + >>> np.ma.common_fill_value(x, y) + 3.0 + + """ + t1 = get_fill_value(a) + t2 = get_fill_value(b) + if t1 == t2: + return t1 + return None + + +def filled(a, fill_value=None): + """ + Return input as an array with masked data replaced by a fill value. + + If `a` is not a `MaskedArray`, `a` itself is returned. + If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to + ``a.fill_value``. + + Parameters + ---------- + a : MaskedArray or array_like + An input object. + fill_value : array_like, optional. + Can be scalar or non-scalar. If non-scalar, the + resulting filled array should be broadcastable + over input array. Default is None. + + Returns + ------- + a : ndarray + The filled array. + + See Also + -------- + compressed + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x.filled() + array([[999999, 1, 2], + [999999, 4, 5], + [ 6, 7, 8]]) + >>> x.filled(fill_value=333) + array([[333, 1, 2], + [333, 4, 5], + [ 6, 7, 8]]) + >>> x.filled(fill_value=np.arange(3)) + array([[0, 1, 2], + [0, 4, 5], + [6, 7, 8]]) + + """ + if hasattr(a, 'filled'): + return a.filled(fill_value) + + elif isinstance(a, ndarray): + # Should we check for contiguity ? and a.flags['CONTIGUOUS']: + return a + elif isinstance(a, dict): + return np.array(a, 'O') + else: + return np.array(a) + + +def get_masked_subclass(*arrays): + """ + Return the youngest subclass of MaskedArray from a list of (masked) arrays. + + In case of siblings, the first listed takes over. + + """ + if len(arrays) == 1: + arr = arrays[0] + if isinstance(arr, MaskedArray): + rcls = type(arr) + else: + rcls = MaskedArray + else: + arrcls = [type(a) for a in arrays] + rcls = arrcls[0] + if not issubclass(rcls, MaskedArray): + rcls = MaskedArray + for cls in arrcls[1:]: + if issubclass(cls, rcls): + rcls = cls + # Don't return MaskedConstant as result: revert to MaskedArray + if rcls.__name__ == 'MaskedConstant': + return MaskedArray + return rcls + + +def getdata(a, subok=True): + """ + Return the data of a masked array as an ndarray. + + Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, + else return `a` as a ndarray or subclass (depending on `subok`) if not. + + Parameters + ---------- + a : array_like + Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. + subok : bool + Whether to force the output to be a `pure` ndarray (False) or to + return a subclass of ndarray if appropriate (True, default). + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getdata(a) + array([[1, 2], + [3, 4]]) + + Equivalently use the ``MaskedArray`` `data` attribute. + + >>> a.data + array([[1, 2], + [3, 4]]) + + """ + try: + data = a._data + except AttributeError: + data = np.array(a, copy=None, subok=subok) + if not subok: + return data.view(ndarray) + return data + + +get_data = getdata + + +def fix_invalid(a, mask=nomask, copy=True, fill_value=None): + """ + Return input with invalid data masked and replaced by a fill value. + + Invalid data means values of `nan`, `inf`, etc. + + Parameters + ---------- + a : array_like + Input array, a (subclass of) ndarray. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + copy : bool, optional + Whether to use a copy of `a` (True) or to fix `a` in place (False). + Default is True. + fill_value : scalar, optional + Value used for fixing invalid data. Default is None, in which case + the ``a.fill_value`` is used. + + Returns + ------- + b : MaskedArray + The input array with invalid entries fixed. + + Notes + ----- + A copy is performed by default. + + Examples + -------- + >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) + >>> x + masked_array(data=[--, -1.0, nan, inf], + mask=[ True, False, False, False], + fill_value=1e+20) + >>> np.ma.fix_invalid(x) + masked_array(data=[--, -1.0, --, --], + mask=[ True, False, True, True], + fill_value=1e+20) + + >>> fixed = np.ma.fix_invalid(x) + >>> fixed.data + array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20]) + >>> x.data + array([ 1., -1., nan, inf]) + + """ + a = masked_array(a, copy=copy, mask=mask, subok=True) + invalid = np.logical_not(np.isfinite(a._data)) + if not invalid.any(): + return a + a._mask |= invalid + if fill_value is None: + fill_value = a.fill_value + a._data[invalid] = fill_value + return a + +def is_string_or_list_of_strings(val): + return (isinstance(val, str) or + (isinstance(val, list) and val and + builtins.all(isinstance(s, str) for s in val))) + +############################################################################### +# Ufuncs # +############################################################################### + + +ufunc_domain = {} +ufunc_fills = {} + + +class _DomainCheckInterval: + """ + Define a valid interval, so that : + + ``domain_check_interval(a,b)(x) == True`` where + ``x < a`` or ``x > b``. + + """ + + def __init__(self, a, b): + "domain_check_interval(a,b)(x) = true where x < a or y > b" + if a > b: + (a, b) = (b, a) + self.a = a + self.b = b + + def __call__(self, x): + "Execute the call behavior." + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(invalid='ignore'): + return umath.logical_or(umath.greater(x, self.b), + umath.less(x, self.a)) + + +class _DomainTan: + """ + Define a valid interval for the `tan` function, so that: + + ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` + + """ + + def __init__(self, eps): + "domain_tan(eps) = true where abs(cos(x)) < eps)" + self.eps = eps + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(umath.absolute(umath.cos(x)), self.eps) + + +class _DomainSafeDivide: + """ + Define a domain for safe division. + + """ + + def __init__(self, tolerance=None): + self.tolerance = tolerance + + def __call__(self, a, b): + # Delay the selection of the tolerance to here in order to reduce numpy + # import times. The calculation of these parameters is a substantial + # component of numpy's import time. + if self.tolerance is None: + self.tolerance = np.finfo(float).tiny + # don't call ma ufuncs from __array_wrap__ which would fail for scalars + a, b = np.asarray(a), np.asarray(b) + with np.errstate(invalid='ignore'): + return umath.absolute(a) * self.tolerance >= umath.absolute(b) + + +class _DomainGreater: + """ + DomainGreater(v)(x) is True where x <= v. + + """ + + def __init__(self, critical_value): + "DomainGreater(v)(x) = true where x <= v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less_equal(x, self.critical_value) + + +class _DomainGreaterEqual: + """ + DomainGreaterEqual(v)(x) is True where x < v. + + """ + + def __init__(self, critical_value): + "DomainGreaterEqual(v)(x) = true where x < v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(x, self.critical_value) + + +class _MaskedUFunc: + def __init__(self, ufunc): + self.f = ufunc + self.__doc__ = ufunc.__doc__ + self.__name__ = ufunc.__name__ + + def __str__(self): + return f"Masked version of {self.f}" + + +class _MaskedUnaryOperation(_MaskedUFunc): + """ + Defines masked version of unary operations, where invalid values are + pre-masked. + + Parameters + ---------- + mufunc : callable + The function for which to define a masked version. Made available + as ``_MaskedUnaryOperation.f``. + fill : scalar, optional + Filling value, default is 0. + domain : class instance + Domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + + """ + + def __init__(self, mufunc, fill=0, domain=None): + super().__init__(mufunc) + self.fill = fill + self.domain = domain + ufunc_domain[mufunc] = domain + ufunc_fills[mufunc] = fill + + def __call__(self, a, *args, **kwargs): + """ + Execute the call behavior. + + """ + d = getdata(a) + # Deal with domain + if self.domain is not None: + # Case 1.1. : Domained function + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + # Make a mask + m = ~umath.isfinite(result) + m |= self.domain(d) + m |= getmask(a) + else: + # Case 1.2. : Function without a domain + # Get the result and the mask + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + m = getmask(a) + + if not result.ndim: + # Case 2.1. : The result is scalarscalar + if m: + return masked + return result + + if m is not nomask: + # Case 2.2. The result is an array + # We need to fill the invalid data back w/ the input Now, + # that's plain silly: in C, we would just skip the element and + # keep the original, but we do have to do it that way in Python + + # In case result has a lower dtype than the inputs (as in + # equal) + try: + np.copyto(result, d, where=m) + except TypeError: + pass + # Transform to + masked_result = result.view(get_masked_subclass(a)) + masked_result._mask = m + masked_result._update_from(a) + return masked_result + + +class _MaskedBinaryOperation(_MaskedUFunc): + """ + Define masked version of binary operations, where invalid + values are pre-masked. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_MaskedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, mbfunc, fillx=0, filly=0): + """ + abfunc(fillx, filly) must be defined. + + abfunc(x, filly) = x for all x to enable reduce. + + """ + super().__init__(mbfunc) + self.fillx = fillx + self.filly = filly + ufunc_domain[mbfunc] = None + ufunc_fills[mbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + """ + Execute the call behavior. + + """ + # Get the data, as ndarray + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(): + np.seterr(divide='ignore', invalid='ignore') + result = self.f(da, db, *args, **kwargs) + # Get the mask for the result + (ma, mb) = (getmask(a), getmask(b)) + if ma is nomask: + if mb is nomask: + m = nomask + else: + m = umath.logical_or(getmaskarray(a), mb) + elif mb is nomask: + m = umath.logical_or(ma, getmaskarray(b)) + else: + m = umath.logical_or(ma, mb) + + # Case 1. : scalar + if not result.ndim: + if m: + return masked + return result + + # Case 2. : array + # Revert result to da where masked + if m is not nomask and m.any(): + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, da, casting='unsafe', where=m) + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + def reduce(self, target, axis=0, dtype=None): + """ + Reduce `target` along the given `axis`. + + """ + tclass = get_masked_subclass(target) + m = getmask(target) + t = filled(target, self.filly) + if t.shape == (): + t = t.reshape(1) + if m is not nomask: + m = make_mask(m, copy=True) + m.shape = (1,) + + if m is nomask: + tr = self.f.reduce(t, axis) + mr = nomask + else: + tr = self.f.reduce(t, axis, dtype=dtype) + mr = umath.logical_and.reduce(m, axis) + + if not tr.shape: + if mr: + return masked + else: + return tr + masked_tr = tr.view(tclass) + masked_tr._mask = mr + return masked_tr + + def outer(self, a, b): + """ + Return the function applied to the outer product of a and b. + + """ + (da, db) = (getdata(a), getdata(b)) + d = self.f.outer(da, db) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = umath.logical_or.outer(ma, mb) + if (not m.ndim) and m: + return masked + if m is not nomask: + np.copyto(d, da, where=m) + if not d.shape: + return d + masked_d = d.view(get_masked_subclass(a, b)) + masked_d._mask = m + return masked_d + + def accumulate(self, target, axis=0): + """Accumulate `target` along `axis` after filling with y fill + value. + + """ + tclass = get_masked_subclass(target) + t = filled(target, self.filly) + result = self.f.accumulate(t, axis) + masked_result = result.view(tclass) + return masked_result + + + +class _DomainedBinaryOperation(_MaskedUFunc): + """ + Define binary operations that have a domain, like divide. + + They have no reduce, outer or accumulate. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_DomainedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, dbfunc, domain, fillx=0, filly=0): + """abfunc(fillx, filly) must be defined. + abfunc(x, filly) = x for all x to enable reduce. + """ + super().__init__(dbfunc) + self.domain = domain + self.fillx = fillx + self.filly = filly + ufunc_domain[dbfunc] = domain + ufunc_fills[dbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + "Execute the call behavior." + # Get the data + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(da, db, *args, **kwargs) + # Get the mask as a combination of the source masks and invalid + m = ~umath.isfinite(result) + m |= getmask(a) + m |= getmask(b) + # Apply the domain + domain = ufunc_domain.get(self.f, None) + if domain is not None: + m |= domain(da, db) + # Take care of the scalar case first + if not m.ndim: + if m: + return masked + else: + return result + # When the mask is True, put back da if possible + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, 0, casting='unsafe', where=m) + # avoid using "*" since this may be overlaid + masked_da = umath.multiply(m, da) + # only add back if it can be cast safely + if np.can_cast(masked_da.dtype, result.dtype, casting='safe'): + result += masked_da + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + +# Unary ufuncs +exp = _MaskedUnaryOperation(umath.exp) +conjugate = _MaskedUnaryOperation(umath.conjugate) +sin = _MaskedUnaryOperation(umath.sin) +cos = _MaskedUnaryOperation(umath.cos) +arctan = _MaskedUnaryOperation(umath.arctan) +arcsinh = _MaskedUnaryOperation(umath.arcsinh) +sinh = _MaskedUnaryOperation(umath.sinh) +cosh = _MaskedUnaryOperation(umath.cosh) +tanh = _MaskedUnaryOperation(umath.tanh) +abs = absolute = _MaskedUnaryOperation(umath.absolute) +angle = _MaskedUnaryOperation(angle) +fabs = _MaskedUnaryOperation(umath.fabs) +negative = _MaskedUnaryOperation(umath.negative) +floor = _MaskedUnaryOperation(umath.floor) +ceil = _MaskedUnaryOperation(umath.ceil) +around = _MaskedUnaryOperation(np.around) +logical_not = _MaskedUnaryOperation(umath.logical_not) + +# Domained unary ufuncs +sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, + _DomainGreaterEqual(0.0)) +log = _MaskedUnaryOperation(umath.log, 1.0, + _DomainGreater(0.0)) +log2 = _MaskedUnaryOperation(umath.log2, 1.0, + _DomainGreater(0.0)) +log10 = _MaskedUnaryOperation(umath.log10, 1.0, + _DomainGreater(0.0)) +tan = _MaskedUnaryOperation(umath.tan, 0.0, + _DomainTan(1e-35)) +arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccos = _MaskedUnaryOperation(umath.arccos, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, + _DomainGreaterEqual(1.0)) +arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, + _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) + +# Binary ufuncs +add = _MaskedBinaryOperation(umath.add) +subtract = _MaskedBinaryOperation(umath.subtract) +multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) +arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) +equal = _MaskedBinaryOperation(umath.equal) +equal.reduce = None +not_equal = _MaskedBinaryOperation(umath.not_equal) +not_equal.reduce = None +less_equal = _MaskedBinaryOperation(umath.less_equal) +less_equal.reduce = None +greater_equal = _MaskedBinaryOperation(umath.greater_equal) +greater_equal.reduce = None +less = _MaskedBinaryOperation(umath.less) +less.reduce = None +greater = _MaskedBinaryOperation(umath.greater) +greater.reduce = None +logical_and = _MaskedBinaryOperation(umath.logical_and) +alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce +logical_or = _MaskedBinaryOperation(umath.logical_or) +sometrue = logical_or.reduce +logical_xor = _MaskedBinaryOperation(umath.logical_xor) +bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) +bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) +bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) +hypot = _MaskedBinaryOperation(umath.hypot) + +# Domained binary ufuncs +divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) +true_divide = _DomainedBinaryOperation(umath.true_divide, + _DomainSafeDivide(), 0, 1) +floor_divide = _DomainedBinaryOperation(umath.floor_divide, + _DomainSafeDivide(), 0, 1) +remainder = _DomainedBinaryOperation(umath.remainder, + _DomainSafeDivide(), 0, 1) +fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) +mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) + + +############################################################################### +# Mask creation functions # +############################################################################### + + +def _replace_dtype_fields_recursive(dtype, primitive_dtype): + "Private function allowing recursion in _replace_dtype_fields." + _recurse = _replace_dtype_fields_recursive + + # Do we have some name fields ? + if dtype.names is not None: + descr = [] + for name in dtype.names: + field = dtype.fields[name] + if len(field) == 3: + # Prepend the title to the name + name = (field[-1], name) + descr.append((name, _recurse(field[0], primitive_dtype))) + new_dtype = np.dtype(descr) + + # Is this some kind of composite a la (float,2) + elif dtype.subdtype: + descr = list(dtype.subdtype) + descr[0] = _recurse(dtype.subdtype[0], primitive_dtype) + new_dtype = np.dtype(tuple(descr)) + + # this is a primitive type, so do a direct replacement + else: + new_dtype = primitive_dtype + + # preserve identity of dtypes + if new_dtype == dtype: + new_dtype = dtype + + return new_dtype + + +def _replace_dtype_fields(dtype, primitive_dtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with all fields and subtypes in the given type + recursively replaced with `primitive_dtype`. + + Arguments are coerced to dtypes first. + """ + dtype = np.dtype(dtype) + primitive_dtype = np.dtype(primitive_dtype) + return _replace_dtype_fields_recursive(dtype, primitive_dtype) + + +def make_mask_descr(ndtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with the type of all fields in `ndtype` to a + boolean type. Field names are not altered. + + Parameters + ---------- + ndtype : dtype + The dtype to convert. + + Returns + ------- + result : dtype + A dtype that looks like `ndtype`, the type of all fields is boolean. + + Examples + -------- + >>> import numpy.ma as ma + >>> dtype = np.dtype({'names':['foo', 'bar'], + ... 'formats':[np.float32, np.int64]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_descr(dtype) + dtype([('foo', '|b1'), ('bar', '|b1')]) + >>> ma.make_mask_descr(np.float32) + dtype('bool') + + """ + return _replace_dtype_fields(ndtype, MaskType) + + +def getmask(a): + """ + Return the mask of a masked array, or nomask. + + Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the + mask is not `nomask`, else return `nomask`. To guarantee a full array + of booleans of the same shape as a, use `getmaskarray`. + + Parameters + ---------- + a : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getdata : Return the data of a masked array as an ndarray. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getmask(a) + array([[False, True], + [False, False]]) + + Equivalently use the `MaskedArray` `mask` attribute. + + >>> a.mask + array([[False, True], + [False, False]]) + + Result when mask == `nomask` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> ma.nomask + False + >>> ma.getmask(b) == ma.nomask + True + >>> b.mask == ma.nomask + True + + """ + return getattr(a, '_mask', nomask) + + +get_mask = getmask + + +def getmaskarray(arr): + """ + Return the mask of a masked array, or full boolean array of False. + + Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and + the mask is not `nomask`, else return a full boolean array of False of + the same shape as `arr`. + + Parameters + ---------- + arr : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getdata : Return the data of a masked array as an ndarray. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getmaskarray(a) + array([[False, True], + [False, False]]) + + Result when mask == ``nomask`` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> ma.getmaskarray(b) + array([[False, False], + [False, False]]) + + """ + mask = getmask(arr) + if mask is nomask: + mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None)) + return mask + + +def is_mask(m): + """ + Return True if m is a valid, standard mask. + + This function does not check the contents of the input, only that the + type is MaskType. In particular, this function returns False if the + mask has a flexible dtype. + + Parameters + ---------- + m : array_like + Array to test. + + Returns + ------- + result : bool + True if `m.dtype.type` is MaskType, False otherwise. + + See Also + -------- + ma.isMaskedArray : Test whether input is an instance of MaskedArray. + + Examples + -------- + >>> import numpy.ma as ma + >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> m + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) + >>> ma.is_mask(m) + False + >>> ma.is_mask(m.mask) + True + + Input must be an ndarray (or have similar attributes) + for it to be considered a valid mask. + + >>> m = [False, True, False] + >>> ma.is_mask(m) + False + >>> m = np.array([False, True, False]) + >>> m + array([False, True, False]) + >>> ma.is_mask(m) + True + + Arrays with complex dtypes don't return True. + + >>> dtype = np.dtype({'names':['monty', 'pithon'], + ... 'formats':[bool, bool]}) + >>> dtype + dtype([('monty', '|b1'), ('pithon', '|b1')]) + >>> m = np.array([(True, False), (False, True), (True, False)], + ... dtype=dtype) + >>> m + array([( True, False), (False, True), ( True, False)], + dtype=[('monty', '?'), ('pithon', '?')]) + >>> ma.is_mask(m) + False + + """ + try: + return m.dtype.type is MaskType + except AttributeError: + return False + + +def _shrink_mask(m): + """ + Shrink a mask to nomask if possible + """ + if m.dtype.names is None and not m.any(): + return nomask + else: + return m + + +def make_mask(m, copy=False, shrink=True, dtype=MaskType): + """ + Create a boolean mask from an array. + + Return `m` as a boolean mask, creating a copy if necessary or requested. + The function can accept any sequence that is convertible to integers, + or ``nomask``. Does not require that contents must be 0s and 1s, values + of 0 are interpreted as False, everything else as True. + + Parameters + ---------- + m : array_like + Potential mask. + copy : bool, optional + Whether to return a copy of `m` (True) or `m` itself (False). + shrink : bool, optional + Whether to shrink `m` to ``nomask`` if all its values are False. + dtype : dtype, optional + Data-type of the output mask. By default, the output mask has a + dtype of MaskType (bool). If the dtype is flexible, each field has + a boolean dtype. This is ignored when `m` is ``nomask``, in which + case ``nomask`` is always returned. + + Returns + ------- + result : ndarray + A boolean mask derived from `m`. + + Examples + -------- + >>> import numpy.ma as ma + >>> m = [True, False, True, True] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 1, 1] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 2, -3] + >>> ma.make_mask(m) + array([ True, False, True, True]) + + Effect of the `shrink` parameter. + + >>> m = np.zeros(4) + >>> m + array([0., 0., 0., 0.]) + >>> ma.make_mask(m) + False + >>> ma.make_mask(m, shrink=False) + array([False, False, False, False]) + + Using a flexible `dtype`. + + >>> m = [1, 0, 1, 1] + >>> n = [0, 1, 0, 0] + >>> arr = [] + >>> for man, mouse in zip(m, n): + ... arr.append((man, mouse)) + >>> arr + [(1, 0), (0, 1), (1, 0), (1, 0)] + >>> dtype = np.dtype({'names':['man', 'mouse'], + ... 'formats':[np.int64, np.int64]}) + >>> arr = np.array(arr, dtype=dtype) + >>> arr + array([(1, 0), (0, 1), (1, 0), (1, 0)], + dtype=[('man', '>> ma.make_mask(arr, dtype=dtype) + array([(True, False), (False, True), (True, False), (True, False)], + dtype=[('man', '|b1'), ('mouse', '|b1')]) + + """ + if m is nomask: + return nomask + + # Make sure the input dtype is valid. + dtype = make_mask_descr(dtype) + + # legacy boolean special case: "existence of fields implies true" + if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool: + return np.ones(m.shape, dtype=dtype) + + # Fill the mask in case there are missing data; turn it into an ndarray. + copy = None if not copy else True + result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True) + # Bas les masques ! + if shrink: + result = _shrink_mask(result) + return result + + +def make_mask_none(newshape, dtype=None): + """ + Return a boolean mask of the given shape, filled with False. + + This function returns a boolean ndarray with all entries False, that can + be used in common mask manipulations. If a complex dtype is specified, the + type of each field is converted to a boolean type. + + Parameters + ---------- + newshape : tuple + A tuple indicating the shape of the mask. + dtype : {None, dtype}, optional + If None, use a MaskType instance. Otherwise, use a new datatype with + the same fields as `dtype`, converted to boolean types. + + Returns + ------- + result : ndarray + An ndarray of appropriate shape and dtype, filled with False. + + See Also + -------- + make_mask : Create a boolean mask from an array. + make_mask_descr : Construct a dtype description list from a given dtype. + + Examples + -------- + >>> import numpy.ma as ma + >>> ma.make_mask_none((3,)) + array([False, False, False]) + + Defining a more complex dtype. + + >>> dtype = np.dtype({'names':['foo', 'bar'], + ... 'formats':[np.float32, np.int64]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_none((3,), dtype=dtype) + array([(False, False), (False, False), (False, False)], + dtype=[('foo', '|b1'), ('bar', '|b1')]) + + """ + if dtype is None: + result = np.zeros(newshape, dtype=MaskType) + else: + result = np.zeros(newshape, dtype=make_mask_descr(dtype)) + return result + + +def _recursive_mask_or(m1, m2, newmask): + names = m1.dtype.names + for name in names: + current1 = m1[name] + if current1.dtype.names is not None: + _recursive_mask_or(current1, m2[name], newmask[name]) + else: + umath.logical_or(current1, m2[name], newmask[name]) + + +def mask_or(m1, m2, copy=False, shrink=True): + """ + Combine two masks with the ``logical_or`` operator. + + The result may be a view on `m1` or `m2` if the other is `nomask` + (i.e. False). + + Parameters + ---------- + m1, m2 : array_like + Input masks. + copy : bool, optional + If copy is False and one of the inputs is `nomask`, return a view + of the other input mask. Defaults to False. + shrink : bool, optional + Whether to shrink the output to `nomask` if all its values are + False. Defaults to True. + + Returns + ------- + mask : output mask + The result masks values that are masked in either `m1` or `m2`. + + Raises + ------ + ValueError + If `m1` and `m2` have different flexible dtypes. + + Examples + -------- + >>> m1 = np.ma.make_mask([0, 1, 1, 0]) + >>> m2 = np.ma.make_mask([1, 0, 0, 0]) + >>> np.ma.mask_or(m1, m2) + array([ True, True, True, False]) + + """ + + if (m1 is nomask) or (m1 is False): + dtype = getattr(m2, 'dtype', MaskType) + return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) + if (m2 is nomask) or (m2 is False): + dtype = getattr(m1, 'dtype', MaskType) + return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) + if m1 is m2 and is_mask(m1): + return m1 + (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) + if dtype1 != dtype2: + raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) + if dtype1.names is not None: + # Allocate an output mask array with the properly broadcast shape. + newmask = np.empty(np.broadcast(m1, m2).shape, dtype1) + _recursive_mask_or(m1, m2, newmask) + return newmask + return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) + + +def flatten_mask(mask): + """ + Returns a completely flattened version of the mask, where nested fields + are collapsed. + + Parameters + ---------- + mask : array_like + Input array, which will be interpreted as booleans. + + Returns + ------- + flattened_mask : ndarray of bools + The flattened input. + + Examples + -------- + >>> mask = np.array([0, 0, 1]) + >>> np.ma.flatten_mask(mask) + array([False, False, True]) + + >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + >>> np.ma.flatten_mask(mask) + array([False, False, False, True]) + + >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) + >>> np.ma.flatten_mask(mask) + array([False, False, False, False, False, True]) + + """ + + def _flatmask(mask): + "Flatten the mask and returns a (maybe nested) sequence of booleans." + mnames = mask.dtype.names + if mnames is not None: + return [flatten_mask(mask[name]) for name in mnames] + else: + return mask + + def _flatsequence(sequence): + "Generates a flattened version of the sequence." + try: + for element in sequence: + if hasattr(element, '__iter__'): + yield from _flatsequence(element) + else: + yield element + except TypeError: + yield sequence + + mask = np.asarray(mask) + flattened = _flatsequence(_flatmask(mask)) + return np.array([_ for _ in flattened], dtype=bool) + + +def _check_mask_axis(mask, axis, keepdims=np._NoValue): + "Check whether there are masked values along the given axis" + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + if mask is not nomask: + return mask.all(axis=axis, **kwargs) + return nomask + + +############################################################################### +# Masking functions # +############################################################################### + +def masked_where(condition, a, copy=True): + """ + Mask an array where a condition is met. + + Return `a` as an array masked where `condition` is True. + Any masked values of `a` or `condition` are also masked in the output. + + Parameters + ---------- + condition : array_like + Masking condition. When `condition` tests floating point values for + equality, consider using ``masked_values`` instead. + a : array_like + Array to mask. + copy : bool + If True (default) make a copy of `a` in the result. If False modify + `a` in place and return a view. + + Returns + ------- + result : MaskedArray + The result of masking `a` where `condition` is True. + + See Also + -------- + masked_values : Mask using floating point equality. + masked_equal : Mask where equal to a given value. + masked_not_equal : Mask where *not* equal to a given value. + masked_less_equal : Mask where less than or equal to a given value. + masked_greater_equal : Mask where greater than or equal to a given value. + masked_less : Mask where less than a given value. + masked_greater : Mask where greater than a given value. + masked_inside : Mask inside a given interval. + masked_outside : Mask outside a given interval. + masked_invalid : Mask invalid values (NaNs or infs). + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_where(a <= 2, a) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + + Mask array `b` conditional on `a`. + + >>> b = ['a', 'b', 'c', 'd'] + >>> ma.masked_where(a == 2, b) + masked_array(data=['a', 'b', --, 'd'], + mask=[False, False, True, False], + fill_value='N/A', + dtype='>> c = ma.masked_where(a <= 2, a) + >>> c + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + >>> c[0] = 99 + >>> c + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) + >>> a + array([0, 1, 2, 3]) + >>> c = ma.masked_where(a <= 2, a, copy=False) + >>> c[0] = 99 + >>> c + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) + >>> a + array([99, 1, 2, 3]) + + When `condition` or `a` contain masked values. + + >>> a = np.arange(4) + >>> a = ma.masked_where(a == 2, a) + >>> a + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=999999) + >>> b = np.arange(4) + >>> b = ma.masked_where(b == 0, b) + >>> b + masked_array(data=[--, 1, 2, 3], + mask=[ True, False, False, False], + fill_value=999999) + >>> ma.masked_where(a == 3, b) + masked_array(data=[--, 1, --, --], + mask=[ True, False, True, True], + fill_value=999999) + + """ + # Make sure that condition is a valid standard-type mask. + cond = make_mask(condition, shrink=False) + a = np.array(a, copy=copy, subok=True) + + (cshape, ashape) = (cond.shape, a.shape) + if cshape and cshape != ashape: + raise IndexError("Inconsistent shape between the condition and the input" + " (got %s and %s)" % (cshape, ashape)) + if hasattr(a, '_mask'): + cond = mask_or(cond, a._mask) + cls = type(a) + else: + cls = MaskedArray + result = a.view(cls) + # Assign to *.mask so that structured masks are handled correctly. + result.mask = _shrink_mask(cond) + # There is no view of a boolean so when 'a' is a MaskedArray with nomask + # the update to the result's mask has no effect. + if not copy and hasattr(a, '_mask') and getmask(a) is nomask: + a._mask = result._mask.view() + return result + + +def masked_greater(x, value, copy=True): + """ + Mask an array where greater than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x > value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater(a, 2) + masked_array(data=[0, 1, 2, --], + mask=[False, False, False, True], + fill_value=999999) + + """ + return masked_where(greater(x, value), x, copy=copy) + + +def masked_greater_equal(x, value, copy=True): + """ + Mask an array where greater than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x >= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater_equal(a, 2) + masked_array(data=[0, 1, --, --], + mask=[False, False, True, True], + fill_value=999999) + + """ + return masked_where(greater_equal(x, value), x, copy=copy) + + +def masked_less(x, value, copy=True): + """ + Mask an array where less than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x < value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less(a, 2) + masked_array(data=[--, --, 2, 3], + mask=[ True, True, False, False], + fill_value=999999) + + """ + return masked_where(less(x, value), x, copy=copy) + + +def masked_less_equal(x, value, copy=True): + """ + Mask an array where less than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x <= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less_equal(a, 2) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + + """ + return masked_where(less_equal(x, value), x, copy=copy) + + +def masked_not_equal(x, value, copy=True): + """ + Mask an array where *not* equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x != value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_not_equal(a, 2) + masked_array(data=[--, --, 2, --], + mask=[ True, True, False, True], + fill_value=999999) + + """ + return masked_where(not_equal(x, value), x, copy=copy) + + +def masked_equal(x, value, copy=True): + """ + Mask an array where equal to a given value. + + Return a MaskedArray, masked where the data in array `x` are + equal to `value`. The fill_value of the returned MaskedArray + is set to `value`. + + For floating point arrays, consider using ``masked_values(x, value)``. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_equal(a, 2) + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=2) + + """ + output = masked_where(equal(x, value), x, copy=copy) + output.fill_value = value + return output + + +def masked_inside(x, v1, v2, copy=True): + """ + Mask an array inside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` inside + the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` + can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_inside(x, -0.3, 0.3) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_inside(x, 0.3, -0.3) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf >= v1) & (xf <= v2) + return masked_where(condition, x, copy=copy) + + +def masked_outside(x, v1, v2, copy=True): + """ + Mask an array outside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` outside + the interval [v1,v2] (x < v1)|(x > v2). + The boundaries `v1` and `v2` can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_outside(x, -0.3, 0.3) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_outside(x, 0.3, -0.3) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf < v1) | (xf > v2) + return masked_where(condition, x, copy=copy) + + +def masked_object(x, value, copy=True, shrink=True): + """ + Mask the array `x` where the data are exactly equal to value. + + This function is similar to `masked_values`, but only suitable + for object arrays: for floating point, use `masked_values` instead. + + Parameters + ---------- + x : array_like + Array to mask + value : object + Comparison value + copy : {True, False}, optional + Whether to return a copy of `x`. + shrink : {True, False}, optional + Whether to collapse a mask full of False to nomask + + Returns + ------- + result : MaskedArray + The result of masking `x` where equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy.ma as ma + >>> food = np.array(['green_eggs', 'ham'], dtype=object) + >>> # don't eat spoiled food + >>> eat = ma.masked_object(food, 'green_eggs') + >>> eat + masked_array(data=[--, 'ham'], + mask=[ True, False], + fill_value='green_eggs', + dtype=object) + >>> # plain ol` ham is boring + >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) + >>> eat = ma.masked_object(fresh_food, 'green_eggs') + >>> eat + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) + + Note that `mask` is set to ``nomask`` if possible. + + >>> eat + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) + + """ + if isMaskedArray(x): + condition = umath.equal(x._data, value) + mask = x._mask + else: + condition = umath.equal(np.asarray(x), value) + mask = nomask + mask = mask_or(mask, make_mask(condition, shrink=shrink)) + return masked_array(x, mask=mask, copy=copy, fill_value=value) + + +def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): + """ + Mask using floating point equality. + + Return a MaskedArray, masked where the data in array `x` are approximately + equal to `value`, determined using `isclose`. The default tolerances for + `masked_values` are the same as those for `isclose`. + + For integer types, exact equality is used, in the same way as + `masked_equal`. + + The fill_value is set to `value` and the mask is set to ``nomask`` if + possible. + + Parameters + ---------- + x : array_like + Array to mask. + value : float + Masking value. + rtol, atol : float, optional + Tolerance parameters passed on to `isclose` + copy : bool, optional + Whether to return a copy of `x`. + shrink : bool, optional + Whether to collapse a mask full of False to ``nomask``. + + Returns + ------- + result : MaskedArray + The result of masking `x` where approximately equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + + Examples + -------- + >>> import numpy.ma as ma + >>> x = np.array([1, 1.1, 2, 1.1, 3]) + >>> ma.masked_values(x, 1.1) + masked_array(data=[1.0, --, 2.0, --, 3.0], + mask=[False, True, False, True, False], + fill_value=1.1) + + Note that `mask` is set to ``nomask`` if possible. + + >>> ma.masked_values(x, 2.1) + masked_array(data=[1. , 1.1, 2. , 1.1, 3. ], + mask=False, + fill_value=2.1) + + Unlike `masked_equal`, `masked_values` can perform approximate equalities. + + >>> ma.masked_values(x, 2.1, atol=1e-1) + masked_array(data=[1.0, 1.1, --, 1.1, 3.0], + mask=[False, False, True, False, False], + fill_value=2.1) + + """ + xnew = filled(x, value) + if np.issubdtype(xnew.dtype, np.floating): + mask = np.isclose(xnew, value, atol=atol, rtol=rtol) + else: + mask = umath.equal(xnew, value) + ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value) + if shrink: + ret.shrink_mask() + return ret + + +def masked_invalid(a, copy=True): + """ + Mask an array where invalid values occur (NaNs or infs). + + This function is a shortcut to ``masked_where``, with + `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. + Only applies to arrays with a dtype where NaNs or infs make sense + (i.e. floating point types), but accepts any array_like object. + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(5, dtype=float) + >>> a[2] = np.nan + >>> a[3] = np.inf + >>> a + array([ 0., 1., nan, inf, 4.]) + >>> ma.masked_invalid(a) + masked_array(data=[0.0, 1.0, --, --, 4.0], + mask=[False, False, True, True, False], + fill_value=1e+20) + + """ + a = np.array(a, copy=None, subok=True) + res = masked_where(~(np.isfinite(a)), a, copy=copy) + # masked_invalid previously never returned nomask as a mask and doing so + # threw off matplotlib (gh-22842). So use shrink=False: + if res._mask is nomask: + res._mask = make_mask_none(res.shape, res.dtype) + return res + +############################################################################### +# Printing options # +############################################################################### + + +class _MaskedPrintOption: + """ + Handle the string used to represent missing data in a masked array. + + """ + + def __init__(self, display): + """ + Create the masked_print_option object. + + """ + self._display = display + self._enabled = True + + def display(self): + """ + Display the string to print for masked values. + + """ + return self._display + + def set_display(self, s): + """ + Set the string to print for masked values. + + """ + self._display = s + + def enabled(self): + """ + Is the use of the display value enabled? + + """ + return self._enabled + + def enable(self, shrink=1): + """ + Set the enabling shrink to `shrink`. + + """ + self._enabled = shrink + + def __str__(self): + return str(self._display) + + __repr__ = __str__ + +# if you single index into a masked location you get this object. +masked_print_option = _MaskedPrintOption('--') + + +def _recursive_printoption(result, mask, printopt): + """ + Puts printoptions in result where mask is True. + + Private function allowing for recursion + + """ + names = result.dtype.names + if names is not None: + for name in names: + curdata = result[name] + curmask = mask[name] + _recursive_printoption(curdata, curmask, printopt) + else: + np.copyto(result, printopt, where=mask) + return + +# For better or worse, these end in a newline +_legacy_print_templates = dict( + long_std=textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + long_flx=textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """), + short_std=textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + short_flx=textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """) +) + +############################################################################### +# MaskedArray class # +############################################################################### + + +def _recursive_filled(a, mask, fill_value): + """ + Recursively fill `a` with `fill_value`. + + """ + names = a.dtype.names + for name in names: + current = a[name] + if current.dtype.names is not None: + _recursive_filled(current, mask[name], fill_value[name]) + else: + np.copyto(current, fill_value[name], where=mask[name]) + + +def flatten_structured_array(a): + """ + Flatten a structured array. + + The data type of the output is chosen such that it can represent all of the + (nested) fields. + + Parameters + ---------- + a : structured array + + Returns + ------- + output : masked array or ndarray + A flattened masked array if the input is a masked array, otherwise a + standard ndarray. + + Examples + -------- + >>> ndtype = [('a', int), ('b', float)] + >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) + >>> np.ma.flatten_structured_array(a) + array([[1., 1.], + [2., 2.]]) + + """ + + def flatten_sequence(iterable): + """ + Flattens a compound of nested iterables. + + """ + for elm in iter(iterable): + if hasattr(elm, '__iter__'): + yield from flatten_sequence(elm) + else: + yield elm + + a = np.asanyarray(a) + inishape = a.shape + a = a.ravel() + if isinstance(a, MaskedArray): + out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) + out = out.view(MaskedArray) + out._mask = np.array([tuple(flatten_sequence(d.item())) + for d in getmaskarray(a)]) + else: + out = np.array([tuple(flatten_sequence(d.item())) for d in a]) + if len(inishape) > 1: + newshape = list(out.shape) + newshape[0] = inishape + out.shape = tuple(flatten_sequence(newshape)) + return out + + +def _arraymethod(funcname, onmask=True): + """ + Return a class method wrapper around a basic array method. + + Creates a class method which returns a masked array, where the new + ``_data`` array is the output of the corresponding basic method called + on the original ``_data``. + + If `onmask` is True, the new mask is the output of the method called + on the initial mask. Otherwise, the new mask is just a reference + to the initial mask. + + Parameters + ---------- + funcname : str + Name of the function to apply on data. + onmask : bool + Whether the mask must be processed also (True) or left + alone (False). Default is True. Make available as `_onmask` + attribute. + + Returns + ------- + method : instancemethod + Class method wrapper of the specified basic array method. + + """ + def wrapped_method(self, *args, **params): + result = getattr(self._data, funcname)(*args, **params) + result = result.view(type(self)) + result._update_from(self) + mask = self._mask + if not onmask: + result.__setmask__(mask) + elif mask is not nomask: + # __setmask__ makes a copy, which we don't want + result._mask = getattr(mask, funcname)(*args, **params) + return result + methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None) + if methdoc is not None: + wrapped_method.__doc__ = methdoc.__doc__ + wrapped_method.__name__ = funcname + return wrapped_method + + +class MaskedIterator: + """ + Flat iterator object to iterate over masked arrays. + + A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array + `x`. It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in C-contiguous style, with the last index varying the + fastest. The iterator can also be indexed using basic slicing or + advanced indexing. + + See Also + -------- + MaskedArray.flat : Return a flat iterator over an array. + MaskedArray.flatten : Returns a flattened copy of an array. + + Notes + ----- + `MaskedIterator` is not exported by the `ma` module. Instead of + instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. + + Examples + -------- + >>> x = np.ma.array(arange(6).reshape(2, 3)) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print(item) + ... + 0 + 1 + 2 + 3 + 4 + 5 + + Extracting more than a single element b indexing the `MaskedIterator` + returns a masked array: + + >>> fl[2:4] + masked_array(data = [2 3], + mask = False, + fill_value = 999999) + + """ + + def __init__(self, ma): + self.ma = ma + self.dataiter = ma._data.flat + + if ma._mask is nomask: + self.maskiter = None + else: + self.maskiter = ma._mask.flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + result = self.dataiter.__getitem__(indx).view(type(self.ma)) + if self.maskiter is not None: + _mask = self.maskiter.__getitem__(indx) + if isinstance(_mask, ndarray): + # set shape to match that of data; this is needed for matrices + _mask.shape = result.shape + result._mask = _mask + elif isinstance(_mask, np.void): + return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) + elif _mask: # Just a scalar, masked + return masked + return result + + # This won't work if ravel makes a copy + def __setitem__(self, index, value): + self.dataiter[index] = getdata(value) + if self.maskiter is not None: + self.maskiter[index] = getmaskarray(value) + + def __next__(self): + """ + Return the next value, or raise StopIteration. + + Examples + -------- + >>> x = np.ma.array([3, 2], mask=[0, 1]) + >>> fl = x.flat + >>> next(fl) + 3 + >>> next(fl) + masked + >>> next(fl) + Traceback (most recent call last): + ... + StopIteration + + """ + d = next(self.dataiter) + if self.maskiter is not None: + m = next(self.maskiter) + if isinstance(m, np.void): + return mvoid(d, mask=m, hardmask=self.ma._hardmask) + elif m: # Just a scalar, masked + return masked + return d + + +@set_module("numpy.ma") +class MaskedArray(ndarray): + """ + An array class with possibly masked values. + + Masked values of True exclude the corresponding element from any + computation. + + Construction:: + + x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True, + ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, + shrink=True, order=None) + + Parameters + ---------- + data : array_like + Input data. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + dtype : dtype, optional + Data type of the output. + If `dtype` is None, the type of the data argument (``data.dtype``) + is used. If `dtype` is not None and different from ``data.dtype``, + a copy is performed. + copy : bool, optional + Whether to copy the input data (True), or to use a reference instead. + Default is False. + subok : bool, optional + Whether to return a subclass of `MaskedArray` if possible (True) or a + plain `MaskedArray`. Default is True. + ndmin : int, optional + Minimum number of dimensions. Default is 0. + fill_value : scalar, optional + Value used to fill in the masked values when necessary. + If None, a default based on the data-type is used. + keep_mask : bool, optional + Whether to combine `mask` with the mask of the input data, if any + (True), or to use only `mask` for the output (False). Default is True. + hard_mask : bool, optional + Whether to use a hard mask or not. With a hard mask, masked values + cannot be unmasked. Default is False. + shrink : bool, optional + Whether to force compression of an empty mask. Default is True. + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C', then the array + will be in C-contiguous order (last-index varies the fastest). + If order is 'F', then the returned array will be in + Fortran-contiguous order (first-index varies the fastest). + If order is 'A' (default), then the returned array may be + in any order (either C-, Fortran-contiguous, or even discontiguous), + unless a copy is required, in which case it will be C-contiguous. + + Examples + -------- + + The ``mask`` can be initialized with an array of boolean values + with the same shape as ``data``. + + >>> data = np.arange(6).reshape((2, 3)) + >>> np.ma.MaskedArray(data, mask=[[False, True, False], + ... [False, False, True]]) + masked_array( + data=[[0, --, 2], + [3, 4, --]], + mask=[[False, True, False], + [False, False, True]], + fill_value=999999) + + Alternatively, the ``mask`` can be initialized to homogeneous boolean + array with the same shape as ``data`` by passing in a scalar + boolean value: + + >>> np.ma.MaskedArray(data, mask=False) + masked_array( + data=[[0, 1, 2], + [3, 4, 5]], + mask=[[False, False, False], + [False, False, False]], + fill_value=999999) + + >>> np.ma.MaskedArray(data, mask=True) + masked_array( + data=[[--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True]], + fill_value=999999, + dtype=int64) + + .. note:: + The recommended practice for initializing ``mask`` with a scalar + boolean value is to use ``True``/``False`` rather than + ``np.True_``/``np.False_``. The reason is :attr:`nomask` + is represented internally as ``np.False_``. + + >>> np.False_ is np.ma.nomask + True + + """ + + __array_priority__ = 15 + _defaultmask = nomask + _defaulthardmask = False + _baseclass = ndarray + + # Maximum number of elements per axis used when printing an array. The + # 1d case is handled separately because we need more values in this case. + _print_width = 100 + _print_width_1d = 1500 + + def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, + subok=True, ndmin=0, fill_value=None, keep_mask=True, + hard_mask=None, shrink=True, order=None): + """ + Create a new masked array from scratch. + + Notes + ----- + A masked array can also be created by taking a .view(MaskedArray). + + """ + # Process data. + copy = None if not copy else True + _data = np.array(data, dtype=dtype, copy=copy, + order=order, subok=True, ndmin=ndmin) + _baseclass = getattr(data, '_baseclass', type(_data)) + # Check that we're not erasing the mask. + if isinstance(data, MaskedArray) and (data.shape != _data.shape): + copy = True + + # Here, we copy the _view_, so that we can attach new properties to it + # we must never do .view(MaskedConstant), as that would create a new + # instance of np.ma.masked, which make identity comparison fail + if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant): + _data = ndarray.view(_data, type(data)) + else: + _data = ndarray.view(_data, cls) + + # Handle the case where data is not a subclass of ndarray, but + # still has the _mask attribute like MaskedArrays + if hasattr(data, '_mask') and not isinstance(data, ndarray): + _data._mask = data._mask + # FIXME: should we set `_data._sharedmask = True`? + # Process mask. + # Type of the mask + mdtype = make_mask_descr(_data.dtype) + if mask is nomask: + # Case 1. : no mask in input. + # Erase the current mask ? + if not keep_mask: + # With a reduced version + if shrink: + _data._mask = nomask + # With full version + else: + _data._mask = np.zeros(_data.shape, dtype=mdtype) + # Check whether we missed something + elif isinstance(data, (tuple, list)): + try: + # If data is a sequence of masked array + mask = np.array( + [getmaskarray(np.asanyarray(m, dtype=_data.dtype)) + for m in data], dtype=mdtype) + except (ValueError, TypeError): + # If data is nested + mask = nomask + # Force shrinking of the mask if needed (and possible) + if (mdtype == MaskType) and mask.any(): + _data._mask = mask + _data._sharedmask = False + else: + _data._sharedmask = not copy + if copy: + _data._mask = _data._mask.copy() + # Reset the shape of the original mask + if getmask(data) is not nomask: + # gh-21022 encounters an issue here + # because data._mask.shape is not writeable, but + # the op was also pointless in that case, because + # the shapes were the same, so we can at least + # avoid that path + if data._mask.shape != data.shape: + data._mask.shape = data.shape + else: + # Case 2. : With a mask in input. + # If mask is boolean, create an array of True or False + + # if users pass `mask=None` be forgiving here and cast it False + # for speed; although the default is `mask=nomask` and can differ. + if mask is None: + mask = False + + if mask is True and mdtype == MaskType: + mask = np.ones(_data.shape, dtype=mdtype) + elif mask is False and mdtype == MaskType: + mask = np.zeros(_data.shape, dtype=mdtype) + else: + # Read the mask with the current mdtype + try: + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Make sure the mask and the data have the same shape + if mask.shape != _data.shape: + (nd, nm) = (_data.size, mask.size) + if nm == 1: + mask = np.resize(mask, _data.shape) + elif nm == nd: + mask = np.reshape(mask, _data.shape) + else: + msg = "Mask and data not compatible: data size is %i, " + \ + "mask size is %i." + raise MaskError(msg % (nd, nm)) + copy = True + # Set the mask to the new value + if _data._mask is nomask: + _data._mask = mask + _data._sharedmask = not copy + else: + if not keep_mask: + _data._mask = mask + _data._sharedmask = not copy + else: + if _data.dtype.names is not None: + def _recursive_or(a, b): + "do a|=b on each field of a, recursively" + for name in a.dtype.names: + (af, bf) = (a[name], b[name]) + if af.dtype.names is not None: + _recursive_or(af, bf) + else: + af |= bf + + _recursive_or(_data._mask, mask) + else: + _data._mask = np.logical_or(mask, _data._mask) + _data._sharedmask = False + + # Update fill_value. + if fill_value is None: + fill_value = getattr(data, '_fill_value', None) + # But don't run the check unless we have something to check. + if fill_value is not None: + _data._fill_value = _check_fill_value(fill_value, _data.dtype) + # Process extra options .. + if hard_mask is None: + _data._hardmask = getattr(data, '_hardmask', False) + else: + _data._hardmask = hard_mask + _data._baseclass = _baseclass + return _data + + + def _update_from(self, obj): + """ + Copies some attributes of obj to self. + + """ + if isinstance(obj, ndarray): + _baseclass = type(obj) + else: + _baseclass = ndarray + # We need to copy the _basedict to avoid backward propagation + _optinfo = {} + _optinfo.update(getattr(obj, '_optinfo', {})) + _optinfo.update(getattr(obj, '_basedict', {})) + if not isinstance(obj, MaskedArray): + _optinfo.update(getattr(obj, '__dict__', {})) + _dict = dict(_fill_value=getattr(obj, '_fill_value', None), + _hardmask=getattr(obj, '_hardmask', False), + _sharedmask=getattr(obj, '_sharedmask', False), + _isfield=getattr(obj, '_isfield', False), + _baseclass=getattr(obj, '_baseclass', _baseclass), + _optinfo=_optinfo, + _basedict=_optinfo) + self.__dict__.update(_dict) + self.__dict__.update(_optinfo) + return + + def __array_finalize__(self, obj): + """ + Finalizes the masked array. + + """ + # Get main attributes. + self._update_from(obj) + + # We have to decide how to initialize self.mask, based on + # obj.mask. This is very difficult. There might be some + # correspondence between the elements in the array we are being + # created from (= obj) and us. Or there might not. This method can + # be called in all kinds of places for all kinds of reasons -- could + # be empty_like, could be slicing, could be a ufunc, could be a view. + # The numpy subclassing interface simply doesn't give us any way + # to know, which means that at best this method will be based on + # guesswork and heuristics. To make things worse, there isn't even any + # clear consensus about what the desired behavior is. For instance, + # most users think that np.empty_like(marr) -- which goes via this + # method -- should return a masked array with an empty mask (see + # gh-3404 and linked discussions), but others disagree, and they have + # existing code which depends on empty_like returning an array that + # matches the input mask. + # + # Historically our algorithm was: if the template object mask had the + # same *number of elements* as us, then we used *it's mask object + # itself* as our mask, so that writes to us would also write to the + # original array. This is horribly broken in multiple ways. + # + # Now what we do instead is, if the template object mask has the same + # number of elements as us, and we do not have the same base pointer + # as the template object (b/c views like arr[...] should keep the same + # mask), then we make a copy of the template object mask and use + # that. This is also horribly broken but somewhat less so. Maybe. + if isinstance(obj, ndarray): + # XX: This looks like a bug -- shouldn't it check self.dtype + # instead? + if obj.dtype.names is not None: + _mask = getmaskarray(obj) + else: + _mask = getmask(obj) + + # If self and obj point to exactly the same data, then probably + # self is a simple view of obj (e.g., self = obj[...]), so they + # should share the same mask. (This isn't 100% reliable, e.g. self + # could be the first row of obj, or have strange strides, but as a + # heuristic it's not bad.) In all other cases, we make a copy of + # the mask, so that future modifications to 'self' do not end up + # side-effecting 'obj' as well. + if (_mask is not nomask and obj.__array_interface__["data"][0] + != self.__array_interface__["data"][0]): + # We should make a copy. But we could get here via astype, + # in which case the mask might need a new dtype as well + # (e.g., changing to or from a structured dtype), and the + # order could have changed. So, change the mask type if + # needed and use astype instead of copy. + if self.dtype == obj.dtype: + _mask_dtype = _mask.dtype + else: + _mask_dtype = make_mask_descr(self.dtype) + + if self.flags.c_contiguous: + order = "C" + elif self.flags.f_contiguous: + order = "F" + else: + order = "K" + + _mask = _mask.astype(_mask_dtype, order) + else: + # Take a view so shape changes, etc., do not propagate back. + _mask = _mask.view() + else: + _mask = nomask + + self._mask = _mask + # Finalize the mask + if self._mask is not nomask: + try: + self._mask.shape = self.shape + except ValueError: + self._mask = nomask + except (TypeError, AttributeError): + # When _mask.shape is not writable (because it's a void) + pass + + # Finalize the fill_value + if self._fill_value is not None: + self._fill_value = _check_fill_value(self._fill_value, self.dtype) + elif self.dtype.names is not None: + # Finalize the default fill_value for structured arrays + self._fill_value = _check_fill_value(None, self.dtype) + + def __array_wrap__(self, obj, context=None, return_scalar=False): + """ + Special hook for ufuncs. + + Wraps the numpy array and sets the mask according to context. + + """ + if obj is self: # for in-place operations + result = obj + else: + result = obj.view(type(self)) + result._update_from(self) + + if context is not None: + result._mask = result._mask.copy() + func, args, out_i = context + # args sometimes contains outputs (gh-10459), which we don't want + input_args = args[:func.nin] + m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) + # Get the domain mask + domain = ufunc_domain.get(func, None) + if domain is not None: + # Take the domain, and make sure it's a ndarray + with np.errstate(divide='ignore', invalid='ignore'): + d = filled(domain(*input_args), True) + + if d.any(): + # Fill the result where the domain is wrong + try: + # Binary domain: take the last value + fill_value = ufunc_fills[func][-1] + except TypeError: + # Unary domain: just use this one + fill_value = ufunc_fills[func] + except KeyError: + # Domain not recognized, use fill_value instead + fill_value = self.fill_value + + np.copyto(result, fill_value, where=d) + + # Update the mask + if m is nomask: + m = d + else: + # Don't modify inplace, we risk back-propagation + m = (m | d) + + # Make sure the mask has the proper size + if result is not self and result.shape == () and m: + return masked + else: + result._mask = m + result._sharedmask = False + + return result + + def view(self, dtype=None, type=None, fill_value=None): + """ + Return a view of the MaskedArray data. + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. + The default, None, results in the view having the same data-type + as `a`. As with ``ndarray.view``, dtype can also be specified as + an ndarray sub-class, which then specifies the type of the + returned object (this is equivalent to setting the ``type`` + parameter). + type : Python type, optional + Type of the returned view, either ndarray or a subclass. The + default None results in type preservation. + fill_value : scalar, optional + The value to use for invalid entries (None by default). + If None, then this argument is inferred from the passed `dtype`, or + in its absence the original array, as discussed in the notes below. + + See Also + -------- + numpy.ndarray.view : Equivalent method on ndarray object. + + Notes + ----- + + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + If `fill_value` is not specified, but `dtype` is specified (and is not + an ndarray sub-class), the `fill_value` of the MaskedArray will be + reset. If neither `fill_value` nor `dtype` are specified (or if + `dtype` is an ndarray sub-class), then the fill value is preserved. + Finally, if `fill_value` is specified, but `dtype` is not, the fill + value is set to the specified value. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a + regular array to a structured array), then the behavior of the view + cannot be predicted just from the superficial appearance of ``a`` (shown + by ``print(a)``). It also depends on exactly how ``a`` is stored in + memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus + defined as a slice or transpose, etc., the view may give different + results. + """ + + if dtype is None: + if type is None: + output = ndarray.view(self) + else: + output = ndarray.view(self, type) + elif type is None: + try: + if issubclass(dtype, ndarray): + output = ndarray.view(self, dtype) + dtype = None + else: + output = ndarray.view(self, dtype) + except TypeError: + output = ndarray.view(self, dtype) + else: + output = ndarray.view(self, dtype, type) + + # also make the mask be a view (so attr changes to the view's + # mask do no affect original object's mask) + # (especially important to avoid affecting np.masked singleton) + if getmask(output) is not nomask: + output._mask = output._mask.view() + + # Make sure to reset the _fill_value if needed + if getattr(output, '_fill_value', None) is not None: + if fill_value is None: + if dtype is None: + pass # leave _fill_value as is + else: + output._fill_value = None + else: + output.fill_value = fill_value + return output + + def __getitem__(self, indx): + """ + x.__getitem__(y) <==> x[y] + + Return the item described by i, as a masked array. + + """ + # We could directly use ndarray.__getitem__ on self. + # But then we would have to modify __array_finalize__ to prevent the + # mask of being reshaped if it hasn't been set up properly yet + # So it's easier to stick to the current version + dout = self.data[indx] + _mask = self._mask + + def _is_scalar(m): + return not isinstance(m, np.ndarray) + + def _scalar_heuristic(arr, elem): + """ + Return whether `elem` is a scalar result of indexing `arr`, or None + if undecidable without promoting nomask to a full mask + """ + # obviously a scalar + if not isinstance(elem, np.ndarray): + return True + + # object array scalar indexing can return anything + elif arr.dtype.type is np.object_: + if arr.dtype is not elem.dtype: + # elem is an array, but dtypes do not match, so must be + # an element + return True + + # well-behaved subclass that only returns 0d arrays when + # expected - this is not a scalar + elif type(arr).__getitem__ == ndarray.__getitem__: + return False + + return None + + if _mask is not nomask: + # _mask cannot be a subclass, so it tells us whether we should + # expect a scalar. It also cannot be of dtype object. + mout = _mask[indx] + scalar_expected = _is_scalar(mout) + + else: + # attempt to apply the heuristic to avoid constructing a full mask + mout = nomask + scalar_expected = _scalar_heuristic(self.data, dout) + if scalar_expected is None: + # heuristics have failed + # construct a full array, so we can be certain. This is costly. + # we could also fall back on ndarray.__getitem__(self.data, indx) + scalar_expected = _is_scalar(getmaskarray(self)[indx]) + + # Did we extract a single item? + if scalar_expected: + # A record + if isinstance(dout, np.void): + # We should always re-cast to mvoid, otherwise users can + # change masks on rows that already have masked values, but not + # on rows that have no masked values, which is inconsistent. + return mvoid(dout, mask=mout, hardmask=self._hardmask) + + # special case introduced in gh-5962 + elif (self.dtype.type is np.object_ and + isinstance(dout, np.ndarray) and + dout is not masked): + # If masked, turn into a MaskedArray, with everything masked. + if mout: + return MaskedArray(dout, mask=True) + else: + return dout + + # Just a scalar + else: + if mout: + return masked + else: + return dout + else: + # Force dout to MA + dout = dout.view(type(self)) + # Inherit attributes from self + dout._update_from(self) + # Check the fill_value + if is_string_or_list_of_strings(indx): + if self._fill_value is not None: + dout._fill_value = self._fill_value[indx] + + # Something like gh-15895 has happened if this check fails. + # _fill_value should always be an ndarray. + if not isinstance(dout._fill_value, np.ndarray): + raise RuntimeError('Internal NumPy error.') + # If we're indexing a multidimensional field in a + # structured array (such as dtype("(2,)i2,(2,)i1")), + # dimensionality goes up (M[field].ndim == M.ndim + + # M.dtype[field].ndim). That's fine for + # M[field] but problematic for M[field].fill_value + # which should have shape () to avoid breaking several + # methods. There is no great way out, so set to + # first element. See issue #6723. + if dout._fill_value.ndim > 0: + if not (dout._fill_value == + dout._fill_value.flat[0]).all(): + warnings.warn( + "Upon accessing multidimensional field " + f"{indx!s}, need to keep dimensionality " + "of fill_value at 0. Discarding " + "heterogeneous fill_value and setting " + f"all to {dout._fill_value[0]!s}.", + stacklevel=2) + # Need to use `.flat[0:1].squeeze(...)` instead of just + # `.flat[0]` to ensure the result is a 0d array and not + # a scalar. + dout._fill_value = dout._fill_value.flat[0:1].squeeze(axis=0) + dout._isfield = True + # Update the mask if needed + if mout is not nomask: + # set shape to match that of data; this is needed for matrices + dout._mask = reshape(mout, dout.shape) + dout._sharedmask = True + # Note: Don't try to check for m.any(), that'll take too long + return dout + + # setitem may put NaNs into integer arrays or occasionally overflow a + # float. But this may happen in masked values, so avoid otherwise + # correct warnings (as is typical also in masked calculations). + @np.errstate(over='ignore', invalid='ignore') + def __setitem__(self, indx, value): + """ + x.__setitem__(i, y) <==> x[i]=y + + Set item described by index. If value is masked, masks those + locations. + + """ + if self is masked: + raise MaskError('Cannot alter the masked element.') + _data = self._data + _mask = self._mask + if isinstance(indx, str): + _data[indx] = value + if _mask is nomask: + self._mask = _mask = make_mask_none(self.shape, self.dtype) + _mask[indx] = getmask(value) + return + + _dtype = _data.dtype + + if value is masked: + # The mask wasn't set: create a full version. + if _mask is nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + # Now, set the mask to its value. + if _dtype.names is not None: + _mask[indx] = tuple([True] * len(_dtype.names)) + else: + _mask[indx] = True + return + + # Get the _data part of the new value + dval = getattr(value, '_data', value) + # Get the _mask part of the new value + mval = getmask(value) + if _dtype.names is not None and mval is nomask: + mval = tuple([False] * len(_dtype.names)) + if _mask is nomask: + # Set the data, then the mask + _data[indx] = dval + if mval is not nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + _mask[indx] = mval + elif not self._hardmask: + # Set the data, then the mask + if (isinstance(indx, masked_array) and + not isinstance(value, masked_array)): + _data[indx.data] = dval + else: + _data[indx] = dval + _mask[indx] = mval + elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): + indx = indx * umath.logical_not(_mask) + _data[indx] = dval + else: + if _dtype.names is not None: + err_msg = "Flexible 'hard' masks are not yet supported." + raise NotImplementedError(err_msg) + mindx = mask_or(_mask[indx], mval, copy=True) + dindx = self._data[indx] + if dindx.size > 1: + np.copyto(dindx, dval, where=~mindx) + elif mindx is nomask: + dindx = dval + _data[indx] = dindx + _mask[indx] = mindx + return + + # Define so that we can overwrite the setter. + @property + def dtype(self): + return super().dtype + + @dtype.setter + def dtype(self, dtype): + super(MaskedArray, type(self)).dtype.__set__(self, dtype) + if self._mask is not nomask: + self._mask = self._mask.view(make_mask_descr(dtype), ndarray) + # Try to reset the shape of the mask (if we don't have a void). + # This raises a ValueError if the dtype change won't work. + try: + self._mask.shape = self.shape + except (AttributeError, TypeError): + pass + + @property + def shape(self): + return super().shape + + @shape.setter + def shape(self, shape): + super(MaskedArray, type(self)).shape.__set__(self, shape) + # Cannot use self._mask, since it may not (yet) exist when a + # masked matrix sets the shape. + if getmask(self) is not nomask: + self._mask.shape = self.shape + + def __setmask__(self, mask, copy=False): + """ + Set the mask. + + """ + idtype = self.dtype + current_mask = self._mask + if mask is masked: + mask = True + + if current_mask is nomask: + # Make sure the mask is set + # Just don't do anything if there's nothing to do. + if mask is nomask: + return + current_mask = self._mask = make_mask_none(self.shape, idtype) + + if idtype.names is None: + # No named fields. + # Hardmask: don't unmask the data + if self._hardmask: + current_mask |= mask + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + else: + # Named fields w/ + mdtype = current_mask.dtype + mask = np.asarray(mask) + # Mask is a singleton + if not mask.ndim: + # It's a boolean : make a record + if mask.dtype.kind == 'b': + mask = np.array(tuple([mask.item()] * len(mdtype)), + dtype=mdtype) + # It's a record: make sure the dtype is correct + else: + mask = mask.astype(mdtype) + # Mask is a sequence + else: + # Make sure the new mask is a ndarray with the proper dtype + try: + copy = None if not copy else True + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Hardmask: don't unmask the data + if self._hardmask: + for n in idtype.names: + current_mask[n] |= mask[n] + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + # Reshape if needed + if current_mask.shape: + current_mask.shape = self.shape + return + + _set_mask = __setmask__ + + @property + def mask(self): + """ Current mask. """ + + # We could try to force a reshape, but that wouldn't work in some + # cases. + # Return a view so that the dtype and shape cannot be changed in place + # This still preserves nomask by identity + return self._mask.view() + + @mask.setter + def mask(self, value): + self.__setmask__(value) + + @property + def recordmask(self): + """ + Get or set the mask of the array if it has no named fields. For + structured arrays, returns a ndarray of booleans where entries are + ``True`` if **all** the fields are masked, ``False`` otherwise: + + >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], + ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], + ... dtype=[('a', int), ('b', int)]) + >>> x.recordmask + array([False, False, True, False, False]) + """ + + _mask = self._mask.view(ndarray) + if _mask.dtype.names is None: + return _mask + return np.all(flatten_structured_array(_mask), axis=-1) + + @recordmask.setter + def recordmask(self, mask): + raise NotImplementedError("Coming soon: setting the mask per records!") + + def harden_mask(self): + """ + Force the mask to hard, preventing unmasking by assignment. + + Whether the mask of a masked array is hard or soft is determined by + its `~ma.MaskedArray.hardmask` property. `harden_mask` sets + `~ma.MaskedArray.hardmask` to ``True`` (and returns the modified + self). + + See Also + -------- + ma.MaskedArray.hardmask + ma.MaskedArray.soften_mask + + """ + self._hardmask = True + return self + + def soften_mask(self): + """ + Force the mask to soft (default), allowing unmasking by assignment. + + Whether the mask of a masked array is hard or soft is determined by + its `~ma.MaskedArray.hardmask` property. `soften_mask` sets + `~ma.MaskedArray.hardmask` to ``False`` (and returns the modified + self). + + See Also + -------- + ma.MaskedArray.hardmask + ma.MaskedArray.harden_mask + + """ + self._hardmask = False + return self + + @property + def hardmask(self): + """ + Specifies whether values can be unmasked through assignments. + + By default, assigning definite values to masked array entries will + unmask them. When `hardmask` is ``True``, the mask will not change + through assignments. + + See Also + -------- + ma.MaskedArray.harden_mask + ma.MaskedArray.soften_mask + + Examples + -------- + >>> x = np.arange(10) + >>> m = np.ma.masked_array(x, x>5) + >>> assert not m.hardmask + + Since `m` has a soft mask, assigning an element value unmasks that + element: + + >>> m[8] = 42 + >>> m + masked_array(data=[0, 1, 2, 3, 4, 5, --, --, 42, --], + mask=[False, False, False, False, False, False, + True, True, False, True], + fill_value=999999) + + After hardening, the mask is not affected by assignments: + + >>> hardened = np.ma.harden_mask(m) + >>> assert m.hardmask and hardened is m + >>> m[:] = 23 + >>> m + masked_array(data=[23, 23, 23, 23, 23, 23, --, --, 23, --], + mask=[False, False, False, False, False, False, + True, True, False, True], + fill_value=999999) + + """ + return self._hardmask + + def unshare_mask(self): + """ + Copy the mask and set the `sharedmask` flag to ``False``. + + Whether the mask is shared between masked arrays can be seen from + the `sharedmask` property. `unshare_mask` ensures the mask is not + shared. A copy of the mask is only made if it was shared. + + See Also + -------- + sharedmask + + """ + if self._sharedmask: + self._mask = self._mask.copy() + self._sharedmask = False + return self + + @property + def sharedmask(self): + """ Share status of the mask (read-only). """ + return self._sharedmask + + def shrink_mask(self): + """ + Reduce a mask to nomask when possible. + + Parameters + ---------- + None + + Returns + ------- + None + + Examples + -------- + >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) + >>> x.mask + array([[False, False], + [False, False]]) + >>> x.shrink_mask() + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> x.mask + False + + """ + self._mask = _shrink_mask(self._mask) + return self + + @property + def baseclass(self): + """ Class of the underlying data (read-only). """ + return self._baseclass + + def _get_data(self): + """ + Returns the underlying data, as a view of the masked array. + + If the underlying data is a subclass of :class:`numpy.ndarray`, it is + returned as such. + + >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.data + matrix([[1, 2], + [3, 4]]) + + The type of the data can be accessed through the :attr:`baseclass` + attribute. + """ + return ndarray.view(self, self._baseclass) + + _data = property(fget=_get_data) + data = property(fget=_get_data) + + @property + def flat(self): + """ Return a flat iterator, or set a flattened version of self to value. """ + return MaskedIterator(self) + + @flat.setter + def flat(self, value): + y = self.ravel() + y[:] = value + + @property + def fill_value(self): + """ + The filling value of the masked array is a scalar. When setting, None + will set to a default based on the data type. + + Examples + -------- + >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: + ... np.ma.array([0, 1], dtype=dt).get_fill_value() + ... + np.int64(999999) + np.int64(999999) + np.float64(1e+20) + np.complex128(1e+20+0j) + + >>> x = np.ma.array([0, 1.], fill_value=-np.inf) + >>> x.fill_value + np.float64(-inf) + >>> x.fill_value = np.pi + >>> x.fill_value + np.float64(3.1415926535897931) + + Reset to default: + + >>> x.fill_value = None + >>> x.fill_value + np.float64(1e+20) + + """ + if self._fill_value is None: + self._fill_value = _check_fill_value(None, self.dtype) + + # Temporary workaround to account for the fact that str and bytes + # scalars cannot be indexed with (), whereas all other numpy + # scalars can. See issues #7259 and #7267. + # The if-block can be removed after #7267 has been fixed. + if isinstance(self._fill_value, ndarray): + return self._fill_value[()] + return self._fill_value + + @fill_value.setter + def fill_value(self, value=None): + target = _check_fill_value(value, self.dtype) + if not target.ndim == 0: + # 2019-11-12, 1.18.0 + warnings.warn( + "Non-scalar arrays for the fill value are deprecated. Use " + "arrays with scalar values instead. The filled function " + "still supports any array as `fill_value`.", + DeprecationWarning, stacklevel=2) + + _fill_value = self._fill_value + if _fill_value is None: + # Create the attribute if it was undefined + self._fill_value = target + else: + # Don't overwrite the attribute, just fill it (for propagation) + _fill_value[()] = target + + # kept for compatibility + get_fill_value = fill_value.fget + set_fill_value = fill_value.fset + + def filled(self, fill_value=None): + """ + Return a copy of self, with masked values filled with a given value. + **However**, if there are no masked values to fill, self will be + returned instead as an ndarray. + + Parameters + ---------- + fill_value : array_like, optional + The value to use for invalid entries. Can be scalar or non-scalar. + If non-scalar, the resulting ndarray must be broadcastable over + input array. Default is None, in which case, the `fill_value` + attribute of the array is used instead. + + Returns + ------- + filled_array : ndarray + A copy of ``self`` with invalid entries replaced by *fill_value* + (be it the function argument or the attribute of ``self``), or + ``self`` itself as an ndarray if there are no invalid entries to + be replaced. + + Notes + ----- + The result is **not** a MaskedArray! + + Examples + -------- + >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) + >>> x.filled() + array([ 1, 2, -999, 4, -999]) + >>> x.filled(fill_value=1000) + array([ 1, 2, 1000, 4, 1000]) + >>> type(x.filled()) + + + Subclassing is preserved. This means that if, e.g., the data part of + the masked array is a recarray, `filled` returns a recarray: + + >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray) + >>> m = np.ma.array(x, mask=[(True, False), (False, True)]) + >>> m.filled() + rec.array([(999999, 2), ( -3, 999999)], + dtype=[('f0', '>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) + >>> x.compressed() + array([0, 1]) + >>> type(x.compressed()) + + + N-D arrays are compressed to 1-D. + + >>> arr = [[1, 2], [3, 4]] + >>> mask = [[1, 0], [0, 1]] + >>> x = np.ma.array(arr, mask=mask) + >>> x.compressed() + array([2, 3]) + + """ + data = ndarray.ravel(self._data) + if self._mask is not nomask: + data = data.compress(np.logical_not(ndarray.ravel(self._mask))) + return data + + def compress(self, condition, axis=None, out=None): + """ + Return `a` where condition is ``True``. + + If condition is a `~ma.MaskedArray`, missing values are considered + as ``False``. + + Parameters + ---------- + condition : var + Boolean 1-d array selecting which entries to return. If len(condition) + is less than the size of a along the axis, then output is truncated + to length of condition array. + axis : {None, int}, optional + Axis along which the operation must be performed. + out : {None, ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type will be cast if + necessary. + + Returns + ------- + result : MaskedArray + A :class:`~ma.MaskedArray` object. + + Notes + ----- + Please note the difference with :meth:`compressed` ! + The output of :meth:`compress` has a mask, the output of + :meth:`compressed` does not. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.compress([1, 0, 1]) + masked_array(data=[1, 3], + mask=[False, False], + fill_value=999999) + + >>> x.compress([1, 0, 1], axis=1) + masked_array( + data=[[1, 3], + [--, --], + [7, 9]], + mask=[[False, False], + [ True, True], + [False, False]], + fill_value=999999) + + """ + # Get the basic components + (_data, _mask) = (self._data, self._mask) + + # Force the condition to a regular ndarray and forget the missing + # values. + condition = np.asarray(condition) + + _new = _data.compress(condition, axis=axis, out=out).view(type(self)) + _new._update_from(self) + if _mask is not nomask: + _new._mask = _mask.compress(condition, axis=axis) + return _new + + def _insert_masked_print(self): + """ + Replace masked values with masked_print_option, casting all innermost + dtypes to object. + """ + if masked_print_option.enabled(): + mask = self._mask + if mask is nomask: + res = self._data + else: + # convert to object array to make filled work + data = self._data + # For big arrays, to avoid a costly conversion to the + # object dtype, extract the corners before the conversion. + print_width = (self._print_width if self.ndim > 1 + else self._print_width_1d) + for axis in range(self.ndim): + if data.shape[axis] > print_width: + ind = print_width // 2 + arr = np.split(data, (ind, -ind), axis=axis) + data = np.concatenate((arr[0], arr[2]), axis=axis) + arr = np.split(mask, (ind, -ind), axis=axis) + mask = np.concatenate((arr[0], arr[2]), axis=axis) + + rdtype = _replace_dtype_fields(self.dtype, "O") + res = data.astype(rdtype) + _recursive_printoption(res, mask, masked_print_option) + else: + res = self.filled(self.fill_value) + return res + + def __str__(self): + return str(self._insert_masked_print()) + + def __repr__(self): + """ + Literal string representation. + + """ + if self._baseclass is np.ndarray: + name = 'array' + else: + name = self._baseclass.__name__ + + + # 2016-11-19: Demoted to legacy format + if np._core.arrayprint._get_legacy_print_mode() <= 113: + is_long = self.ndim > 1 + parameters = dict( + name=name, + nlen=" " * len(name), + data=str(self), + mask=str(self._mask), + fill=str(self.fill_value), + dtype=str(self.dtype) + ) + is_structured = bool(self.dtype.names) + key = '{}_{}'.format( + 'long' if is_long else 'short', + 'flx' if is_structured else 'std' + ) + return _legacy_print_templates[key] % parameters + + prefix = f"masked_{name}(" + + dtype_needed = ( + not np._core.arrayprint.dtype_is_implied(self.dtype) or + np.all(self.mask) or + self.size == 0 + ) + + # determine which keyword args need to be shown + keys = ['data', 'mask', 'fill_value'] + if dtype_needed: + keys.append('dtype') + + # array has only one row (non-column) + is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1]) + + # choose what to indent each keyword with + min_indent = 2 + if is_one_row: + # first key on the same line as the type, remaining keys + # aligned by equals + indents = {} + indents[keys[0]] = prefix + for k in keys[1:]: + n = builtins.max(min_indent, len(prefix + keys[0]) - len(k)) + indents[k] = ' ' * n + prefix = '' # absorbed into the first indent + else: + # each key on its own line, indented by two spaces + indents = {k: ' ' * min_indent for k in keys} + prefix = prefix + '\n' # first key on the next line + + # format the field values + reprs = {} + reprs['data'] = np.array2string( + self._insert_masked_print(), + separator=", ", + prefix=indents['data'] + 'data=', + suffix=',') + reprs['mask'] = np.array2string( + self._mask, + separator=", ", + prefix=indents['mask'] + 'mask=', + suffix=',') + + if self._fill_value is None: + self.fill_value # initialize fill_value + + if (self._fill_value.dtype.kind in ("S", "U") + and self.dtype.kind == self._fill_value.dtype.kind): + # Allow strings: "N/A" has length 3 so would mismatch. + fill_repr = repr(self.fill_value.item()) + elif self._fill_value.dtype == self.dtype and not self.dtype == object: + # Guess that it is OK to use the string as item repr. To really + # fix this, it needs new logic (shared with structured scalars) + fill_repr = str(self.fill_value) + else: + fill_repr = repr(self.fill_value) + + reprs['fill_value'] = fill_repr + if dtype_needed: + reprs['dtype'] = np._core.arrayprint.dtype_short_repr(self.dtype) + + # join keys with values and indentations + result = ',\n'.join( + '{}{}={}'.format(indents[k], k, reprs[k]) + for k in keys + ) + return prefix + result + ')' + + def _delegate_binop(self, other): + # This emulates the logic in + # private/binop_override.h:forward_binop_should_defer + if isinstance(other, type(self)): + return False + array_ufunc = getattr(other, "__array_ufunc__", False) + if array_ufunc is False: + other_priority = getattr(other, "__array_priority__", -1000000) + return self.__array_priority__ < other_priority + else: + # If array_ufunc is not None, it will be called inside the ufunc; + # None explicitly tells us to not call the ufunc, i.e., defer. + return array_ufunc is None + + def _comparison(self, other, compare): + """Compare self with other using operator.eq or operator.ne. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + omask = getmask(other) + smask = self.mask + mask = mask_or(smask, omask, copy=True) + + odata = getdata(other) + if mask.dtype.names is not None: + # only == and != are reasonably defined for structured dtypes, + # so give up early for all other comparisons: + if compare not in (operator.eq, operator.ne): + return NotImplemented + # For possibly masked structured arrays we need to be careful, + # since the standard structured array comparison will use all + # fields, masked or not. To avoid masked fields influencing the + # outcome, we set all masked fields in self to other, so they'll + # count as equal. To prepare, we ensure we have the right shape. + broadcast_shape = np.broadcast(self, odata).shape + sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True) + sbroadcast._mask = mask + sdata = sbroadcast.filled(odata) + # Now take care of the mask; the merged mask should have an item + # masked if all fields were masked (in one and/or other). + mask = (mask == np.ones((), mask.dtype)) + # Ensure we can compare masks below if other was not masked. + if omask is np.False_: + omask = np.zeros((), smask.dtype) + + else: + # For regular arrays, just use the data as they come. + sdata = self.data + + check = compare(sdata, odata) + + if isinstance(check, (np.bool, bool)): + return masked if mask else check + + if mask is not nomask: + if compare in (operator.eq, operator.ne): + # Adjust elements that were masked, which should be treated + # as equal if masked in both, unequal if masked in one. + # Note that this works automatically for structured arrays too. + # Ignore this for operations other than `==` and `!=` + check = np.where(mask, compare(smask, omask), check) + + if mask.shape != check.shape: + # Guarantee consistency of the shape, making a copy since the + # the mask may need to get written to later. + mask = np.broadcast_to(mask, check.shape).copy() + + check = check.view(type(self)) + check._update_from(self) + check._mask = mask + + # Cast fill value to np.bool if needed. If it cannot be cast, the + # default boolean fill value is used. + if check._fill_value is not None: + try: + fill = _check_fill_value(check._fill_value, np.bool) + except (TypeError, ValueError): + fill = _check_fill_value(None, np.bool) + check._fill_value = fill + + return check + + def __eq__(self, other): + """Check whether other equals self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.eq) + + def __ne__(self, other): + """Check whether other does not equal self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.ne) + + # All other comparisons: + def __le__(self, other): + return self._comparison(other, operator.le) + + def __lt__(self, other): + return self._comparison(other, operator.lt) + + def __ge__(self, other): + return self._comparison(other, operator.ge) + + def __gt__(self, other): + return self._comparison(other, operator.gt) + + def __add__(self, other): + """ + Add self to other, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return add(self, other) + + def __radd__(self, other): + """ + Add other to self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other + self`. + return add(other, self) + + def __sub__(self, other): + """ + Subtract other from self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return subtract(self, other) + + def __rsub__(self, other): + """ + Subtract self from other, and return a new masked array. + + """ + return subtract(other, self) + + def __mul__(self, other): + "Multiply self by other, and return a new masked array." + if self._delegate_binop(other): + return NotImplemented + return multiply(self, other) + + def __rmul__(self, other): + """ + Multiply other by self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other * self`. + return multiply(other, self) + + def __div__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return divide(self, other) + + def __truediv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return true_divide(self, other) + + def __rtruediv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return true_divide(other, self) + + def __floordiv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return floor_divide(self, other) + + def __rfloordiv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return floor_divide(other, self) + + def __pow__(self, other): + """ + Raise self to the power other, masking the potential NaNs/Infs + + """ + if self._delegate_binop(other): + return NotImplemented + return power(self, other) + + def __rpow__(self, other): + """ + Raise other to the power self, masking the potential NaNs/Infs + + """ + return power(other, self) + + def __iadd__(self, other): + """ + Add other to self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + else: + if m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(0), other_data) + self._data.__iadd__(other_data) + return self + + def __isub__(self, other): + """ + Subtract other from self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(0), other_data) + self._data.__isub__(other_data) + return self + + def __imul__(self, other): + """ + Multiply self by other in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__imul__(other_data) + return self + + def __idiv__(self, other): + """ + Divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 4 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.divide] + other_data = np.where( + dom_mask, other_data.dtype.type(fval), other_data) + self._mask |= new_mask + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__idiv__(other_data) + return self + + def __ifloordiv__(self, other): + """ + Floor divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.floor_divide] + other_data = np.where( + dom_mask, other_data.dtype.type(fval), other_data) + self._mask |= new_mask + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__ifloordiv__(other_data) + return self + + def __itruediv__(self, other): + """ + True divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.true_divide] + other_data = np.where( + dom_mask, other_data.dtype.type(fval), other_data) + self._mask |= new_mask + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__itruediv__(other_data) + return self + + def __ipow__(self, other): + """ + Raise self to the power other, in place. + + """ + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + other_mask = getmask(other) + with np.errstate(divide='ignore', invalid='ignore'): + self._data.__ipow__(other_data) + invalid = np.logical_not(np.isfinite(self._data)) + if invalid.any(): + if self._mask is not nomask: + self._mask |= invalid + else: + self._mask = invalid + np.copyto(self._data, self.fill_value, where=invalid) + new_mask = mask_or(other_mask, invalid) + self._mask = mask_or(self._mask, new_mask) + return self + + def __float__(self): + """ + Convert to float. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + warnings.warn("Warning: converting a masked element to nan.", stacklevel=2) + return np.nan + return float(self.item()) + + def __int__(self): + """ + Convert to int. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + raise MaskError('Cannot convert masked element to a Python int.') + return int(self.item()) + + @property + def imag(self): + """ + The imaginary part of the masked array. + + This property is a view on the imaginary part of this `MaskedArray`. + + See Also + -------- + real + + Examples + -------- + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.imag + masked_array(data=[1.0, --, 1.6], + mask=[False, True, False], + fill_value=1e+20) + + """ + result = self._data.imag.view(type(self)) + result.__setmask__(self._mask) + return result + + # kept for compatibility + get_imag = imag.fget + + @property + def real(self): + """ + The real part of the masked array. + + This property is a view on the real part of this `MaskedArray`. + + See Also + -------- + imag + + Examples + -------- + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.real + masked_array(data=[1.0, --, 3.45], + mask=[False, True, False], + fill_value=1e+20) + + """ + result = self._data.real.view(type(self)) + result.__setmask__(self._mask) + return result + + # kept for compatibility + get_real = real.fget + + def count(self, axis=None, keepdims=np._NoValue): + """ + Count the non-masked elements of the array along the given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis or axes along which the count is performed. + The default, None, performs the count over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.10.0 + + If this is a tuple of ints, the count is performed on multiple + axes, instead of a single axis or all the axes as before. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + result : ndarray or scalar + An array with the same shape as the input array, with the specified + axis removed. If the array is a 0-d array, or if `axis` is None, a + scalar is returned. + + See Also + -------- + ma.count_masked : Count masked elements in array or along a given axis. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.arange(6).reshape((2, 3)) + >>> a[1, :] = ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, --, --]], + mask=[[False, False, False], + [ True, True, True]], + fill_value=999999) + >>> a.count() + 3 + + When the `axis` keyword is specified an array of appropriate size is + returned. + + >>> a.count(axis=0) + array([1, 1, 1]) + >>> a.count(axis=1) + array([3, 0]) + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + m = self._mask + # special case for matrices (we assume no other subclasses modify + # their dimensions) + if isinstance(self.data, np.matrix): + if m is nomask: + m = np.zeros(self.shape, dtype=np.bool) + m = m.view(type(self.data)) + + if m is nomask: + # compare to _count_reduce_items in _methods.py + + if self.shape == (): + if axis not in (None, 0): + raise np.exceptions.AxisError(axis=axis, ndim=self.ndim) + return 1 + elif axis is None: + if kwargs.get('keepdims', False): + return np.array(self.size, dtype=np.intp, ndmin=self.ndim) + return self.size + + axes = normalize_axis_tuple(axis, self.ndim) + items = 1 + for ax in axes: + items *= self.shape[ax] + + if kwargs.get('keepdims', False): + out_dims = list(self.shape) + for a in axes: + out_dims[a] = 1 + else: + out_dims = [d for n, d in enumerate(self.shape) + if n not in axes] + # make sure to return a 0-d array if axis is supplied + return np.full(out_dims, items, dtype=np.intp) + + # take care of the masked singleton + if self is masked: + return 0 + + return (~m).sum(axis=axis, dtype=np.intp, **kwargs) + + def ravel(self, order='C'): + """ + Returns a 1D version of self, as a view. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `a` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + (Masked arrays currently use 'A' on the data when 'K' is passed.) + + Returns + ------- + MaskedArray + Output view is of shape ``(self.size,)`` (or + ``(np.ma.product(self.shape),)``). + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.ravel() + masked_array(data=[1, --, 3, --, 5, --, 7, --, 9], + mask=[False, True, False, True, False, True, False, True, + False], + fill_value=999999) + + """ + # The order of _data and _mask could be different (it shouldn't be + # normally). Passing order `K` or `A` would be incorrect. + # So we ignore the mask memory order. + # TODO: We don't actually support K, so use A instead. We could + # try to guess this correct by sorting strides or deprecate. + if order in "kKaA": + order = "F" if self._data.flags.fnc else "C" + r = ndarray.ravel(self._data, order=order).view(type(self)) + r._update_from(self) + if self._mask is not nomask: + r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape) + else: + r._mask = nomask + return r + + + def reshape(self, *s, **kwargs): + """ + Give a new shape to the array without changing its data. + + Returns a masked array containing the same data, but with a new shape. + The result is a view on the original array; if this is not possible, a + ValueError is raised. + + Parameters + ---------- + shape : int or tuple of ints + The new shape should be compatible with the original shape. If an + integer is supplied, then the result will be a 1-D array of that + length. + order : {'C', 'F'}, optional + Determines whether the array data should be viewed as in C + (row-major) or FORTRAN (column-major) order. + + Returns + ------- + reshaped_array : array + A new view on the array. + + See Also + -------- + reshape : Equivalent function in the masked array module. + numpy.ndarray.reshape : Equivalent method on ndarray object. + numpy.reshape : Equivalent function in the NumPy module. + + Notes + ----- + The reshaping operation cannot guarantee that a copy will not be made, + to modify the shape in place, use ``a.shape = s`` + + Examples + -------- + >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) + >>> x + masked_array( + data=[[--, 2], + [3, --]], + mask=[[ True, False], + [False, True]], + fill_value=999999) + >>> x = x.reshape((4,1)) + >>> x + masked_array( + data=[[--], + [2], + [3], + [--]], + mask=[[ True], + [False], + [False], + [ True]], + fill_value=999999) + + """ + kwargs.update(order=kwargs.get('order', 'C')) + result = self._data.reshape(*s, **kwargs).view(type(self)) + result._update_from(self) + mask = self._mask + if mask is not nomask: + result._mask = mask.reshape(*s, **kwargs) + return result + + def resize(self, newshape, refcheck=True, order=False): + """ + .. warning:: + + This method does nothing, except raise a ValueError exception. A + masked array does not own its data and therefore cannot safely be + resized in place. Use the `numpy.ma.resize` function instead. + + This method is difficult to implement safely and may be deprecated in + future releases of NumPy. + + """ + # Note : the 'order' keyword looks broken, let's just drop it + errmsg = "A masked array does not own its data "\ + "and therefore cannot be resized.\n" \ + "Use the numpy.ma.resize function instead." + raise ValueError(errmsg) + + def put(self, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + Sets self._data.flat[n] = values[n] for each n in indices. + If `values` is shorter than `indices` then it will repeat. + If `values` has some masked values, the initial mask is updated + in consequence, else the corresponding values are unmasked. + + Parameters + ---------- + indices : 1-D array_like + Target indices, interpreted as integers. + values : array_like + Values to place in self._data copy at target indices. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + 'raise' : raise an error. + 'wrap' : wrap around. + 'clip' : clip to the range. + + Notes + ----- + `values` can be a scalar or length 1 array. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.put([0,4,8],[10,20,30]) + >>> x + masked_array( + data=[[10, --, 3], + [--, 20, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + >>> x.put(4,999) + >>> x + masked_array( + data=[[10, --, 3], + [--, 999, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + """ + # Hard mask: Get rid of the values/indices that fall on masked data + if self._hardmask and self._mask is not nomask: + mask = self._mask[indices] + indices = narray(indices, copy=None) + values = narray(values, copy=None, subok=True) + values.resize(indices.shape) + indices = indices[~mask] + values = values[~mask] + + self._data.put(indices, values, mode=mode) + + # short circuit if neither self nor values are masked + if self._mask is nomask and getmask(values) is nomask: + return + + m = getmaskarray(self) + + if getmask(values) is nomask: + m.put(indices, False, mode=mode) + else: + m.put(indices, values._mask, mode=mode) + m = make_mask(m, copy=False, shrink=True) + self._mask = m + return + + def ids(self): + """ + Return the addresses of the data and mask areas. + + Parameters + ---------- + None + + Examples + -------- + >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) + >>> x.ids() + (166670640, 166659832) # may vary + + If the array has no mask, the address of `nomask` is returned. This address + is typically not close to the data in memory: + + >>> x = np.ma.array([1, 2, 3]) + >>> x.ids() + (166691080, 3083169284) # may vary + + """ + if self._mask is nomask: + return (self.ctypes.data, id(nomask)) + return (self.ctypes.data, self._mask.ctypes.data) + + def iscontiguous(self): + """ + Return a boolean indicating whether the data is contiguous. + + Parameters + ---------- + None + + Examples + -------- + >>> x = np.ma.array([1, 2, 3]) + >>> x.iscontiguous() + True + + `iscontiguous` returns one of the flags of the masked array: + + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : True + OWNDATA : False + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + + """ + return self.flags['CONTIGUOUS'] + + def all(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if all elements evaluate to True. + + The output array is masked where all the values along the given axis + are masked: if the output would have been a scalar and that all the + values are masked, then the output is `masked`. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + numpy.ndarray.all : corresponding function for ndarrays + numpy.all : equivalent function + + Examples + -------- + >>> np.ma.array([1,2,3]).all() + True + >>> a = np.ma.array([1,2,3], mask=True) + >>> (a.all() is np.ma.masked) + True + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(True).all(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + return masked + return d + self.filled(True).all(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def any(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if any of the elements of `a` evaluate to True. + + Masked values are considered as False during computation. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.ndarray.any : corresponding function for ndarrays + numpy.any : equivalent function + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(False).any(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + d = masked + return d + self.filled(False).any(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def nonzero(self): + """ + Return the indices of unmasked elements that are not zero. + + Returns a tuple of arrays, one for each dimension, containing the + indices of the non-zero elements in that dimension. The corresponding + non-zero values can be obtained with:: + + a[a.nonzero()] + + To group the indices by element, rather than dimension, use + instead:: + + np.transpose(a.nonzero()) + + The result of this is always a 2d array, with a row for each non-zero + element. + + Parameters + ---------- + None + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + numpy.nonzero : + Function operating on ndarrays. + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + numpy.ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.array(np.eye(3)) + >>> x + masked_array( + data=[[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]], + mask=False, + fill_value=1e+20) + >>> x.nonzero() + (array([0, 1, 2]), array([0, 1, 2])) + + Masked elements are ignored. + + >>> x[1, 1] = ma.masked + >>> x + masked_array( + data=[[1.0, 0.0, 0.0], + [0.0, --, 0.0], + [0.0, 0.0, 1.0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1e+20) + >>> x.nonzero() + (array([0, 2]), array([0, 2])) + + Indices can also be grouped by element. + + >>> np.transpose(x.nonzero()) + array([[0, 0], + [2, 2]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, ma.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) + >>> a > 3 + masked_array( + data=[[False, False, False], + [ True, True, True], + [ True, True, True]], + mask=False, + fill_value=True) + >>> ma.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + The ``nonzero`` method of the condition array can also be called. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return np.asarray(self.filled(0)).nonzero() + + def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + (this docstring should be overwritten) + """ + #!!!: implement out + test! + m = self._mask + if m is nomask: + result = super().trace(offset=offset, axis1=axis1, axis2=axis2, + out=out) + return result.astype(dtype) + else: + D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) + return D.astype(dtype).filled(0).sum(axis=-1, out=out) + trace.__doc__ = ndarray.trace.__doc__ + + def dot(self, b, out=None, strict=False): + """ + a.dot(b, out=None) + + Masked dot product of two arrays. Note that `out` and `strict` are + located in different positions than in `ma.dot`. In order to + maintain compatibility with the functional version, it is + recommended that the optional arguments be treated as keyword only. + At some point that may be mandatory. + + .. versionadded:: 1.10.0 + + Parameters + ---------- + b : masked_array_like + Inputs array. + out : masked_array, optional + Output argument. This must have the exact kind that would be + returned if it was not used. In particular, it must have the + right type, must be C-contiguous, and its dtype must be the + dtype that would be returned for `ma.dot(a,b)`. This is a + performance feature. Therefore, if these conditions are not + met, an exception is raised, instead of attempting to be + flexible. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) + for the computation. Default is False. Propagating the mask + means that if a masked value appears in a row or column, the + whole row or column is considered masked. + + .. versionadded:: 1.10.2 + + See Also + -------- + numpy.ma.dot : equivalent function + + """ + return dot(self, b, out=out, strict=strict) + + def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the sum of the array elements over the given axis. + + Masked elements are set to 0 internally. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.ndarray.sum : corresponding function for ndarrays + numpy.sum : equivalent function + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.sum() + 25 + >>> x.sum(axis=1) + masked_array(data=[4, 5, 16], + mask=[False, False, False], + fill_value=999999) + >>> x.sum(axis=0) + masked_array(data=[8, 5, 12], + mask=[False, False, False], + fill_value=999999) + >>> print(type(x.sum(axis=0, dtype=np.int64)[0])) + + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(0).sum(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + + def cumsum(self, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the array elements over the given axis. + + Masked values are set to 0 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumsum` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid :class:`ma.MaskedArray` ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + numpy.ndarray.cumsum : corresponding function for ndarrays + numpy.cumsum : equivalent function + + Examples + -------- + >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) + >>> marr.cumsum() + masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], + mask=[False, False, False, True, True, True, False, False, + False, False], + fill_value=999999) + + """ + result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self.mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the product of the array elements over the given axis. + + Masked elements are set to 1 internally for computation. + + Refer to `numpy.prod` for full documentation. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is raised + on overflow. + + See Also + -------- + numpy.ndarray.prod : corresponding function for ndarrays + numpy.prod : equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(1).prod(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + product = prod + + def cumprod(self, axis=None, dtype=None, out=None): + """ + Return the cumulative product of the array elements over the given axis. + + Masked values are set to 1 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumprod` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid MaskedArray ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + numpy.ndarray.cumprod : corresponding function for ndarrays + numpy.cumprod : equivalent function + """ + result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Returns the average of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.ndarray.mean : corresponding function for ndarrays + numpy.mean : Equivalent function + numpy.ma.average : Weighted average. + + Examples + -------- + >>> a = np.ma.array([1,2,3], mask=[False, False, True]) + >>> a + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> a.mean() + 1.5 + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + if self._mask is nomask: + result = super().mean(axis=axis, dtype=dtype, **kwargs)[()] + else: + is_float16_result = False + if dtype is None: + if issubclass(self.dtype.type, (ntypes.integer, ntypes.bool)): + dtype = mu.dtype('f8') + elif issubclass(self.dtype.type, ntypes.float16): + dtype = mu.dtype('f4') + is_float16_result = True + dsum = self.sum(axis=axis, dtype=dtype, **kwargs) + cnt = self.count(axis=axis, **kwargs) + if cnt.shape == () and (cnt == 0): + result = masked + elif is_float16_result: + result = self.dtype.type(dsum * 1. / cnt) + else: + result = dsum * 1. / cnt + if out is not None: + out.flat = result + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = getmask(result) + return out + return result + + def anom(self, axis=None, dtype=None): + """ + Compute the anomalies (deviations from the arithmetic mean) + along the given axis. + + Returns an array of anomalies, with the same shape as the input and + where the arithmetic mean is computed along the given axis. + + Parameters + ---------- + axis : int, optional + Axis over which the anomalies are taken. + The default is to use the mean of the flattened array as reference. + dtype : dtype, optional + Type to use in computing the variance. For arrays of integer type + the default is float32; for arrays of float types it is the same as + the array type. + + See Also + -------- + mean : Compute the mean of the array. + + Examples + -------- + >>> a = np.ma.array([1,2,3]) + >>> a.anom() + masked_array(data=[-1., 0., 1.], + mask=False, + fill_value=1e+20) + + """ + m = self.mean(axis, dtype) + if not axis: + return self - m + else: + return self - expand_dims(m, axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue, mean=np._NoValue): + """ + Returns the variance of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.ndarray.var : corresponding function for ndarrays + numpy.var : Equivalent function + """ + kwargs = {} + + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + + # Easy case: nomask, business as usual + if self._mask is nomask: + + if mean is not np._NoValue: + kwargs['mean'] = mean + + ret = super().var(axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs)[()] + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(nomask) + return out + return ret + + # Some data are masked, yay! + cnt = self.count(axis=axis, **kwargs) - ddof + + if mean is not np._NoValue: + danom = self - mean + else: + danom = self - self.mean(axis, dtype, keepdims=True) + + if iscomplexobj(self): + danom = umath.absolute(danom) ** 2 + else: + danom *= danom + dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self)) + # Apply the mask if it's not a scalar + if dvar.ndim: + dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0)) + dvar._update_from(self) + elif getmask(dvar): + # Make sure that masked is returned when the scalar is masked. + dvar = masked + if out is not None: + if isinstance(out, MaskedArray): + out.flat = 0 + out.__setmask__(True) + elif out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or "\ + "more location." + raise MaskError(errmsg) + else: + out.flat = np.nan + return out + # In case with have an explicit output + if out is not None: + # Set the data + out.flat = dvar + # Set the mask if needed + if isinstance(out, MaskedArray): + out.__setmask__(dvar.mask) + return out + return dvar + var.__doc__ = np.var.__doc__ + + def std(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue, mean=np._NoValue): + """ + Returns the standard deviation of the array elements along given axis. + + Masked entries are ignored. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.ndarray.std : corresponding function for ndarrays + numpy.std : Equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + dvar = self.var(axis, dtype, out, ddof, **kwargs) + if dvar is not masked: + if out is not None: + np.power(out, 0.5, out=out, casting='unsafe') + return out + dvar = sqrt(dvar) + return dvar + + def round(self, decimals=0, out=None): + """ + Return each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.ndarray.round : corresponding function for ndarrays + numpy.around : equivalent function + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.array([1.35, 2.5, 1.5, 1.75, 2.25, 2.75], + ... mask=[0, 0, 0, 1, 0, 0]) + >>> ma.round(x) + masked_array(data=[1.0, 2.0, 2.0, --, 2.0, 3.0], + mask=[False, False, False, True, False, False], + fill_value=1e+20) + + """ + result = self._data.round(decimals=decimals, out=out).view(type(self)) + if result.ndim > 0: + result._mask = self._mask + result._update_from(self) + elif self._mask: + # Return masked when the scalar is masked + result = masked + # No explicit output: we're done + if out is None: + return result + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + + def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, + fill_value=None, *, stable=False): + """ + Return an ndarray of indices that sort the array along the + specified axis. Masked values are filled beforehand to + `fill_value`. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. If None, the default, the flattened array + is used. + + .. versionchanged:: 1.13.0 + Previously, the default was documented to be -1, but that was + in error. At some future date, the default will change to -1, as + originally intended. + Until then, the axis should be given explicitly when + ``arr.ndim > 1``, to avoid a FutureWarning. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + The sorting algorithm used. + order : list, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. Not all fields need be + specified. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : scalar or None, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + stable : bool, optional + Only for compatibility with ``np.argsort``. Ignored. + + Returns + ------- + index_array : ndarray, int + Array of indices that sort `a` along the specified axis. + In other words, ``a[index_array]`` yields a sorted `a`. + + See Also + -------- + ma.MaskedArray.sort : Describes sorting algorithms used. + lexsort : Indirect stable sort with multiple keys. + numpy.ndarray.sort : Inplace sort. + + Notes + ----- + See `sort` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = np.ma.array([3,2,1], mask=[False, False, True]) + >>> a + masked_array(data=[3, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> a.argsort() + array([1, 0, 2]) + + """ + if stable: + raise ValueError( + "`stable` parameter is not supported for masked arrays." + ) + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(self) + + if fill_value is None: + if endwith: + # nan > inf + if np.issubdtype(self.dtype, np.floating): + fill_value = np.nan + else: + fill_value = minimum_fill_value(self) + else: + fill_value = maximum_fill_value(self) + + filled = self.filled(fill_value) + return filled.argsort(axis=axis, kind=kind, order=order) + + def argmin(self, axis=None, fill_value=None, out=None, *, + keepdims=np._NoValue): + """ + Return array of indices to the minimum values along the given axis. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : scalar or None, optional + Value used to fill in the masked values. If None, the output of + minimum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + ndarray or scalar + If multi-dimension input, returns a new ndarray of indices to the + minimum values along the given axis. Otherwise, returns a scalar + of index to the minimum values along the given axis. + + Examples + -------- + >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) + >>> x.shape = (2,2) + >>> x + masked_array( + data=[[--, --], + [2, 3]], + mask=[[ True, True], + [False, False]], + fill_value=999999) + >>> x.argmin(axis=0, fill_value=-1) + array([0, 0]) + >>> x.argmin(axis=0, fill_value=9) + array([1, 1]) + + """ + if fill_value is None: + fill_value = minimum_fill_value(self) + d = self.filled(fill_value).view(ndarray) + keepdims = False if keepdims is np._NoValue else bool(keepdims) + return d.argmin(axis, out=out, keepdims=keepdims) + + def argmax(self, axis=None, fill_value=None, out=None, *, + keepdims=np._NoValue): + """ + Returns array of indices of the maximum values along the given axis. + Masked values are treated as if they had the value fill_value. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : scalar or None, optional + Value used to fill in the masked values. If None, the output of + maximum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + index_array : {integer_array} + + Examples + -------- + >>> a = np.arange(6).reshape(2,3) + >>> a.argmax() + 5 + >>> a.argmax(0) + array([1, 1, 1]) + >>> a.argmax(1) + array([2, 2]) + + """ + if fill_value is None: + fill_value = maximum_fill_value(self._data) + d = self.filled(fill_value).view(ndarray) + keepdims = False if keepdims is np._NoValue else bool(keepdims) + return d.argmax(axis, out=out, keepdims=keepdims) + + def sort(self, axis=-1, kind=None, order=None, endwith=True, + fill_value=None, *, stable=False): + """ + Sort the array, in-place + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + The sorting algorithm used. + order : list, optional + When `a` is a structured array, this argument specifies which fields + to compare first, second, and so on. This list does not need to + include all of the fields. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values sorting at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : scalar or None, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + stable : bool, optional + Only for compatibility with ``np.sort``. Ignored. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + numpy.ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + + Notes + ----- + See ``sort`` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Default + >>> a.sort() + >>> a + masked_array(data=[1, 3, 5, --, --], + mask=[False, False, False, True, True], + fill_value=999999) + + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Put missing values in the front + >>> a.sort(endwith=False) + >>> a + masked_array(data=[--, --, 1, 3, 5], + mask=[ True, True, False, False, False], + fill_value=999999) + + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # fill_value takes over endwith + >>> a.sort(endwith=False, fill_value=3) + >>> a + masked_array(data=[1, --, --, 3, 5], + mask=[False, True, True, False, False], + fill_value=999999) + + """ + if stable: + raise ValueError( + "`stable` parameter is not supported for masked arrays." + ) + + if self._mask is nomask: + ndarray.sort(self, axis=axis, kind=kind, order=order) + return + + if self is masked: + return + + sidx = self.argsort(axis=axis, kind=kind, order=order, + fill_value=fill_value, endwith=endwith) + + self[...] = np.take_along_axis(self, sidx, axis=axis) + + def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the minimum along a given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + .. versionadded:: 1.7.0 + If this is a tuple of ints, the minimum is selected over multiple + axes, instead of a single axis or all the axes as before. + out : array_like, optional + Alternative output array in which to place the result. Must be of + the same shape and buffer length as the expected output. + fill_value : scalar or None, optional + Value used to fill in the masked values. + If None, use the output of `minimum_fill_value`. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + amin : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + ma.minimum_fill_value + Returns the minimum filling value for a given datatype. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [[1., -2., 3.], [0.2, -0.7, 0.1]] + >>> mask = [[1, 1, 0], [0, 0, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array( + data=[[--, --, 3.0], + [0.2, -0.7, --]], + mask=[[ True, True, False], + [False, False, True]], + fill_value=1e+20) + >>> ma.min(masked_x) + -0.7 + >>> ma.min(masked_x, axis=-1) + masked_array(data=[3.0, -0.7], + mask=[False, False], + fill_value=1e+20) + >>> ma.min(masked_x, axis=0, keepdims=True) + masked_array(data=[[0.2, -0.7, 3.0]], + mask=[[False, False, False]], + fill_value=1e+20) + >>> mask = [[1, 1, 1,], [1, 1, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> ma.min(masked_x, axis=0) + masked_array(data=[--, --, --], + mask=[ True, True, True], + fill_value=1e+20, + dtype=float64) + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = minimum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).min( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(fill_value).min(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the maximum along a given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + .. versionadded:: 1.7.0 + If this is a tuple of ints, the maximum is selected over multiple + axes, instead of a single axis or all the axes as before. + out : array_like, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + fill_value : scalar or None, optional + Value used to fill in the masked values. + If None, use the output of maximum_fill_value(). + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + amax : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + ma.maximum_fill_value + Returns the maximum filling value for a given datatype. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [[-1., 2.5], [4., -2.], [3., 0.]] + >>> mask = [[0, 0], [1, 0], [1, 0]] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array( + data=[[-1.0, 2.5], + [--, -2.0], + [--, 0.0]], + mask=[[False, False], + [ True, False], + [ True, False]], + fill_value=1e+20) + >>> ma.max(masked_x) + 2.5 + >>> ma.max(masked_x, axis=0) + masked_array(data=[-1.0, 2.5], + mask=[False, False], + fill_value=1e+20) + >>> ma.max(masked_x, axis=1, keepdims=True) + masked_array( + data=[[2.5], + [-2.0], + [0.0]], + mask=[[False], + [False], + [False]], + fill_value=1e+20) + >>> mask = [[1, 1], [1, 1], [1, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> ma.max(masked_x, axis=1) + masked_array(data=[--, --, --], + mask=[ True, True, True], + fill_value=1e+20, + dtype=float64) + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = maximum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).max( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(fill_value).max(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): + """ + Return (maximum - minimum) along the given dimension + (i.e. peak-to-peak value). + + .. warning:: + `ptp` preserves the data type of the array. This means the + return value for an input of signed integers with n bits + (e.g. `np.int8`, `np.int16`, etc) is also a signed integer + with n bits. In that case, peak-to-peak values greater than + ``2**(n-1)-1`` will be returned as negative values. An example + with a work-around is shown below. + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to find the peaks. If None (default) the + flattened array is used. + out : {None, array_like}, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + fill_value : scalar or None, optional + Value used to fill in the masked values. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + ptp : ndarray. + A new array holding the result, unless ``out`` was + specified, in which case a reference to ``out`` is returned. + + Examples + -------- + >>> x = np.ma.MaskedArray([[4, 9, 2, 10], + ... [6, 9, 7, 12]]) + + >>> x.ptp(axis=1) + masked_array(data=[8, 6], + mask=False, + fill_value=999999) + + >>> x.ptp(axis=0) + masked_array(data=[2, 0, 5, 2], + mask=False, + fill_value=999999) + + >>> x.ptp() + 10 + + This example shows that a negative value can be returned when + the input is an array of signed integers. + + >>> y = np.ma.MaskedArray([[1, 127], + ... [0, 127], + ... [-1, 127], + ... [-2, 127]], dtype=np.int8) + >>> y.ptp(axis=1) + masked_array(data=[ 126, 127, -128, -127], + mask=False, + fill_value=np.int64(999999), + dtype=int8) + + A work-around is to use the `view()` method to view the result as + unsigned integers with the same bit width: + + >>> y.ptp(axis=1).view(np.uint8) + masked_array(data=[126, 127, 128, 129], + mask=False, + fill_value=np.int64(999999), + dtype=uint8) + """ + if out is None: + result = self.max(axis=axis, fill_value=fill_value, + keepdims=keepdims) + result -= self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + return result + out.flat = self.max(axis=axis, out=out, fill_value=fill_value, + keepdims=keepdims) + min_value = self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + np.subtract(out, min_value, out=out, casting='unsafe') + return out + + def partition(self, *args, **kwargs): + warnings.warn("Warning: 'partition' will ignore the 'mask' " + f"of the {self.__class__.__name__}.", + stacklevel=2) + return super().partition(*args, **kwargs) + + def argpartition(self, *args, **kwargs): + warnings.warn("Warning: 'argpartition' will ignore the 'mask' " + f"of the {self.__class__.__name__}.", + stacklevel=2) + return super().argpartition(*args, **kwargs) + + def take(self, indices, axis=None, out=None, mode='raise'): + """ + """ + (_data, _mask) = (self._data, self._mask) + cls = type(self) + # Make sure the indices are not masked + maskindices = getmask(indices) + if maskindices is not nomask: + indices = indices.filled(0) + # Get the data, promoting scalars to 0d arrays with [...] so that + # .view works correctly + if out is None: + out = _data.take(indices, axis=axis, mode=mode)[...].view(cls) + else: + np.take(_data, indices, axis=axis, mode=mode, out=out) + # Get the mask + if isinstance(out, MaskedArray): + if _mask is nomask: + outmask = maskindices + else: + outmask = _mask.take(indices, axis=axis, mode=mode) + outmask |= maskindices + out.__setmask__(outmask) + # demote 0d arrays back to scalars, for consistency with ndarray.take + return out[()] + + # Array methods + copy = _arraymethod('copy') + diagonal = _arraymethod('diagonal') + flatten = _arraymethod('flatten') + repeat = _arraymethod('repeat') + squeeze = _arraymethod('squeeze') + swapaxes = _arraymethod('swapaxes') + T = property(fget=lambda self: self.transpose()) + transpose = _arraymethod('transpose') + + @property + def mT(self): + """ + Return the matrix-transpose of the masked array. + + The matrix transpose is the transpose of the last two dimensions, even + if the array is of higher dimension. + + .. versionadded:: 2.0 + + Returns + ------- + result: MaskedArray + The masked array with the last two dimensions transposed + + Raises + ------ + ValueError + If the array is of dimension less than 2. + + See Also + -------- + ndarray.mT: + Equivalent method for arrays + """ + + if self.ndim < 2: + raise ValueError("matrix transpose with ndim < 2 is undefined") + + if self._mask is nomask: + return masked_array(data=self._data.mT) + else: + return masked_array(data=self.data.mT, mask=self.mask.mT) + + + def tolist(self, fill_value=None): + """ + Return the data portion of the masked array as a hierarchical Python list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to `fill_value`. If `fill_value` is None, + the corresponding entries in the output list will be ``None``. + + Parameters + ---------- + fill_value : scalar, optional + The value to use for invalid entries. Default is None. + + Returns + ------- + result : list + The Python list representation of the masked array. + + Examples + -------- + >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) + >>> x.tolist() + [[1, None, 3], [None, 5, None], [7, None, 9]] + >>> x.tolist(-999) + [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] + + """ + _mask = self._mask + # No mask ? Just return .data.tolist ? + if _mask is nomask: + return self._data.tolist() + # Explicit fill_value: fill the array and get the list + if fill_value is not None: + return self.filled(fill_value).tolist() + # Structured array. + names = self.dtype.names + if names: + result = self._data.astype([(_, object) for _ in names]) + for n in names: + result[n][_mask[n]] = None + return result.tolist() + # Standard arrays. + if _mask is nomask: + return [None] + # Set temps to save time when dealing w/ marrays. + inishape = self.shape + result = np.array(self._data.ravel(), dtype=object) + result[_mask.ravel()] = None + result.shape = inishape + return result.tolist() + + def tostring(self, fill_value=None, order='C'): + r""" + A compatibility alias for `tobytes`, with exactly the same behavior. + + Despite its name, it returns `bytes` not `str`\ s. + + .. deprecated:: 1.19.0 + """ + # 2020-03-30, Numpy 1.19.0 + warnings.warn( + "tostring() is deprecated. Use tobytes() instead.", + DeprecationWarning, stacklevel=2) + + return self.tobytes(fill_value, order=order) + + def tobytes(self, fill_value=None, order='C'): + """ + Return the array data as a string containing the raw bytes in the array. + + The array is filled with a fill value before the string conversion. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + fill_value : scalar, optional + Value used to fill in the masked values. Default is None, in which + case `MaskedArray.fill_value` is used. + order : {'C','F','A'}, optional + Order of the data item in the copy. Default is 'C'. + + - 'C' -- C order (row major). + - 'F' -- Fortran order (column major). + - 'A' -- Any, current order of array. + - None -- Same as 'A'. + + See Also + -------- + numpy.ndarray.tobytes + tolist, tofile + + Notes + ----- + As for `ndarray.tobytes`, information about the shape, dtype, etc., + but also about `fill_value`, will be lost. + + Examples + -------- + >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.tobytes() + b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00' + + """ + return self.filled(fill_value).tobytes(order=order) + + def tofile(self, fid, sep="", format="%s"): + """ + Save a masked array to a file in binary format. + + .. warning:: + This function is not implemented yet. + + Raises + ------ + NotImplementedError + When `tofile` is called. + + """ + raise NotImplementedError("MaskedArray.tofile() not implemented yet.") + + def toflex(self): + """ + Transforms a masked array into a flexible-type array. + + The flexible type array that is returned will have two fields: + + * the ``_data`` field stores the ``_data`` part of the array. + * the ``_mask`` field stores the ``_mask`` part of the array. + + Parameters + ---------- + None + + Returns + ------- + record : ndarray + A new flexible-type `ndarray` with two fields: the first element + containing a value, the second element containing the corresponding + mask boolean. The returned record shape matches self.shape. + + Notes + ----- + A side-effect of transforming a masked array into a flexible `ndarray` is + that meta information (``fill_value``, ...) will be lost. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.toflex() + array([[(1, False), (2, True), (3, False)], + [(4, True), (5, False), (6, True)], + [(7, False), (8, True), (9, False)]], + dtype=[('_data', 'i2", (2,))]) + # x = A[0]; y = x["A"]; then y.mask["A"].size==2 + # and we can not say masked/unmasked. + # The result is no longer mvoid! + # See also issue #6724. + return masked_array( + data=self._data[indx], mask=m[indx], + fill_value=self._fill_value[indx], + hard_mask=self._hardmask) + if m is not nomask and m[indx]: + return masked + return self._data[indx] + + def __setitem__(self, indx, value): + self._data[indx] = value + if self._hardmask: + self._mask[indx] |= getattr(value, "_mask", False) + else: + self._mask[indx] = getattr(value, "_mask", False) + + def __str__(self): + m = self._mask + if m is nomask: + return str(self._data) + + rdtype = _replace_dtype_fields(self._data.dtype, "O") + data_arr = super()._data + res = data_arr.astype(rdtype) + _recursive_printoption(res, self._mask, masked_print_option) + return str(res) + + __repr__ = __str__ + + def __iter__(self): + "Defines an iterator for mvoid" + (_data, _mask) = (self._data, self._mask) + if _mask is nomask: + yield from _data + else: + for (d, m) in zip(_data, _mask): + if m: + yield masked + else: + yield d + + def __len__(self): + return self._data.__len__() + + def filled(self, fill_value=None): + """ + Return a copy with masked fields filled with a given value. + + Parameters + ---------- + fill_value : array_like, optional + The value to use for invalid entries. Can be scalar or + non-scalar. If latter is the case, the filled array should + be broadcastable over input array. Default is None, in + which case the `fill_value` attribute is used instead. + + Returns + ------- + filled_void + A `np.void` object + + See Also + -------- + MaskedArray.filled + + """ + return asarray(self).filled(fill_value)[()] + + def tolist(self): + """ + Transforms the mvoid object into a tuple. + + Masked fields are replaced by None. + + Returns + ------- + returned_tuple + Tuple of fields + """ + _mask = self._mask + if _mask is nomask: + return self._data.tolist() + result = [] + for (d, m) in zip(self._data, self._mask): + if m: + result.append(None) + else: + # .item() makes sure we return a standard Python object + result.append(d.item()) + return tuple(result) + + +############################################################################## +# Shortcuts # +############################################################################## + + +def isMaskedArray(x): + """ + Test whether input is an instance of MaskedArray. + + This function returns True if `x` is an instance of MaskedArray + and returns False otherwise. Any object is accepted as input. + + Parameters + ---------- + x : object + Object to test. + + Returns + ------- + result : bool + True if `x` is a MaskedArray. + + See Also + -------- + isMA : Alias to isMaskedArray. + isarray : Alias to isMaskedArray. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.eye(3, 3) + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> m = ma.masked_values(a, 0) + >>> m + masked_array( + data=[[1.0, --, --], + [--, 1.0, --], + [--, --, 1.0]], + mask=[[False, True, True], + [ True, False, True], + [ True, True, False]], + fill_value=0.0) + >>> ma.isMaskedArray(a) + False + >>> ma.isMaskedArray(m) + True + >>> ma.isMaskedArray([0, 1, 2]) + False + + """ + return isinstance(x, MaskedArray) + + +isarray = isMaskedArray +isMA = isMaskedArray # backward compatibility + + +class MaskedConstant(MaskedArray): + # the lone np.ma.masked instance + __singleton = None + + @classmethod + def __has_singleton(cls): + # second case ensures `cls.__singleton` is not just a view on the + # superclass singleton + return cls.__singleton is not None and type(cls.__singleton) is cls + + def __new__(cls): + if not cls.__has_singleton(): + # We define the masked singleton as a float for higher precedence. + # Note that it can be tricky sometimes w/ type comparison + data = np.array(0.) + mask = np.array(True) + + # prevent any modifications + data.flags.writeable = False + mask.flags.writeable = False + + # don't fall back on MaskedArray.__new__(MaskedConstant), since + # that might confuse it - this way, the construction is entirely + # within our control + cls.__singleton = MaskedArray(data, mask=mask).view(cls) + + return cls.__singleton + + def __array_finalize__(self, obj): + if not self.__has_singleton(): + # this handles the `.view` in __new__, which we want to copy across + # properties normally + return super().__array_finalize__(obj) + elif self is self.__singleton: + # not clear how this can happen, play it safe + pass + else: + # everywhere else, we want to downcast to MaskedArray, to prevent a + # duplicate maskedconstant. + self.__class__ = MaskedArray + MaskedArray.__array_finalize__(self, obj) + + def __array_wrap__(self, obj, context=None, return_scalar=False): + return self.view(MaskedArray).__array_wrap__(obj, context) + + def __str__(self): + return str(masked_print_option._display) + + def __repr__(self): + if self is MaskedConstant.__singleton: + return 'masked' + else: + # it's a subclass, or something is wrong, make it obvious + return object.__repr__(self) + + def __format__(self, format_spec): + # Replace ndarray.__format__ with the default, which supports no format characters. + # Supporting format characters is unwise here, because we do not know what type + # the user was expecting - better to not guess. + try: + return object.__format__(self, format_spec) + except TypeError: + # 2020-03-23, NumPy 1.19.0 + warnings.warn( + "Format strings passed to MaskedConstant are ignored, but in future may " + "error or produce different behavior", + FutureWarning, stacklevel=2 + ) + return object.__format__(self, "") + + def __reduce__(self): + """Override of MaskedArray's __reduce__. + """ + return (self.__class__, ()) + + # inplace operations have no effect. We have to override them to avoid + # trying to modify the readonly data and mask arrays + def __iop__(self, other): + return self + __iadd__ = \ + __isub__ = \ + __imul__ = \ + __ifloordiv__ = \ + __itruediv__ = \ + __ipow__ = \ + __iop__ + del __iop__ # don't leave this around + + def copy(self, *args, **kwargs): + """ Copy is a no-op on the maskedconstant, as it is a scalar """ + # maskedconstant is a scalar, so copy doesn't need to copy. There's + # precedent for this with `np.bool` scalars. + return self + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + def __setattr__(self, attr, value): + if not self.__has_singleton(): + # allow the singleton to be initialized + return super().__setattr__(attr, value) + elif self is self.__singleton: + raise AttributeError( + f"attributes of {self!r} are not writeable") + else: + # duplicate instance - we can end up here from __array_finalize__, + # where we set the __class__ attribute + return super().__setattr__(attr, value) + + +masked = masked_singleton = MaskedConstant() +masked_array = MaskedArray + + +def array(data, dtype=None, copy=False, order=None, + mask=nomask, fill_value=None, keep_mask=True, + hard_mask=False, shrink=True, subok=True, ndmin=0): + """ + Shortcut to MaskedArray. + + The options are in a different order for convenience and backwards + compatibility. + + """ + return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, + subok=subok, keep_mask=keep_mask, + hard_mask=hard_mask, fill_value=fill_value, + ndmin=ndmin, shrink=shrink, order=order) +array.__doc__ = masked_array.__doc__ + + +def is_masked(x): + """ + Determine whether input has masked values. + + Accepts any object as input, but always returns False unless the + input is a MaskedArray containing masked values. + + Parameters + ---------- + x : array_like + Array to check for masked values. + + Returns + ------- + result : bool + True if `x` is a MaskedArray with masked values, False otherwise. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> x + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) + >>> ma.is_masked(x) + True + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) + >>> x + masked_array(data=[0, 1, 0, 2, 3], + mask=False, + fill_value=42) + >>> ma.is_masked(x) + False + + Always returns False if `x` isn't a MaskedArray. + + >>> x = [False, True, False] + >>> ma.is_masked(x) + False + >>> x = 'a string' + >>> ma.is_masked(x) + False + + """ + m = getmask(x) + if m is nomask: + return False + elif m.any(): + return True + return False + + +############################################################################## +# Extrema functions # +############################################################################## + + +class _extrema_operation(_MaskedUFunc): + """ + Generic class for maximum/minimum functions. + + .. note:: + This is the base class for `_maximum_operation` and + `_minimum_operation`. + + """ + def __init__(self, ufunc, compare, fill_value): + super().__init__(ufunc) + self.compare = compare + self.fill_value_func = fill_value + + def __call__(self, a, b): + "Executes the call behavior." + + return where(self.compare(a, b), a, b) + + def reduce(self, target, axis=np._NoValue): + "Reduce target along the given axis." + target = narray(target, copy=None, subok=True) + m = getmask(target) + + if axis is np._NoValue and target.ndim > 1: + # 2017-05-06, Numpy 1.13.0: warn on axis default + warnings.warn( + f"In the future the default for ma.{self.__name__}.reduce will be axis=0, " + f"not the current None, to match np.{self.__name__}.reduce. " + "Explicitly pass 0 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=2) + axis = None + + if axis is not np._NoValue: + kwargs = dict(axis=axis) + else: + kwargs = dict() + + if m is nomask: + t = self.f.reduce(target, **kwargs) + else: + target = target.filled( + self.fill_value_func(target)).view(type(target)) + t = self.f.reduce(target, **kwargs) + m = umath.logical_and.reduce(m, **kwargs) + if hasattr(t, '_mask'): + t._mask = m + elif m: + t = masked + return t + + def outer(self, a, b): + "Return the function applied to the outer product of a and b." + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = logical_or.outer(ma, mb) + result = self.f.outer(filled(a), filled(b)) + if not isinstance(result, MaskedArray): + result = result.view(MaskedArray) + result._mask = m + return result + +def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a min method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).min(axis=axis, fill_value=fill_value, + out=out, **kwargs) +min.__doc__ = MaskedArray.min.__doc__ + +def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a max method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).max(axis=axis, fill_value=fill_value, + out=out, **kwargs) +max.__doc__ = MaskedArray.max.__doc__ + + +def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + try: + return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a ptp method or if the method doesn't accept + # a fill_value argument + return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, + out=out, **kwargs) +ptp.__doc__ = MaskedArray.ptp.__doc__ + + +############################################################################## +# Definition of functions from the corresponding methods # +############################################################################## + + +class _frommethod: + """ + Define functions from existing MaskedArray methods. + + Parameters + ---------- + methodname : str + Name of the method to transform. + + """ + + def __init__(self, methodname, reversed=False): + self.__name__ = methodname + self.__doc__ = self.getdoc() + self.reversed = reversed + + def getdoc(self): + "Return the doc of the function (from the doc of the method)." + meth = getattr(MaskedArray, self.__name__, None) or\ + getattr(np, self.__name__, None) + signature = self.__name__ + get_object_signature(meth) + if meth is not None: + doc = """ %s\n%s""" % ( + signature, getattr(meth, '__doc__', None)) + return doc + + def __call__(self, a, *args, **params): + if self.reversed: + args = list(args) + a, args[0] = args[0], a + + marr = asanyarray(a) + method_name = self.__name__ + method = getattr(type(marr), method_name, None) + if method is None: + # use the corresponding np function + method = getattr(np, method_name) + + return method(marr, *args, **params) + + +all = _frommethod('all') +anomalies = anom = _frommethod('anom') +any = _frommethod('any') +compress = _frommethod('compress', reversed=True) +cumprod = _frommethod('cumprod') +cumsum = _frommethod('cumsum') +copy = _frommethod('copy') +diagonal = _frommethod('diagonal') +harden_mask = _frommethod('harden_mask') +ids = _frommethod('ids') +maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value) +mean = _frommethod('mean') +minimum = _extrema_operation(umath.minimum, less, minimum_fill_value) +nonzero = _frommethod('nonzero') +prod = _frommethod('prod') +product = _frommethod('prod') +ravel = _frommethod('ravel') +repeat = _frommethod('repeat') +shrink_mask = _frommethod('shrink_mask') +soften_mask = _frommethod('soften_mask') +std = _frommethod('std') +sum = _frommethod('sum') +swapaxes = _frommethod('swapaxes') +#take = _frommethod('take') +trace = _frommethod('trace') +var = _frommethod('var') + +count = _frommethod('count') + +def take(a, indices, axis=None, out=None, mode='raise'): + """ + """ + a = masked_array(a) + return a.take(indices, axis=axis, out=out, mode=mode) + + +def power(a, b, third=None): + """ + Returns element-wise base array raised to power from second array. + + This is the masked array version of `numpy.power`. For details see + `numpy.power`. + + See Also + -------- + numpy.power + + Notes + ----- + The *out* argument to `numpy.power` is not supported, `third` has to be + None. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.power(masked_x, 2) + masked_array(data=[125.43999999999998, 15.784728999999999, + 0.6416010000000001, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> y = [-0.5, 2, 0, 17] + >>> masked_y = ma.masked_array(y, mask) + >>> masked_y + masked_array(data=[-0.5, 2.0, 0.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.power(masked_x, masked_y) + masked_array(data=[0.2988071523335984, 15.784728999999999, 1.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + + """ + if third is not None: + raise MaskError("3-argument power not supported.") + # Get the masks + ma = getmask(a) + mb = getmask(b) + m = mask_or(ma, mb) + # Get the rawdata + fa = getdata(a) + fb = getdata(b) + # Get the type of the result (so that we preserve subclasses) + if isinstance(a, MaskedArray): + basetype = type(a) + else: + basetype = MaskedArray + # Get the result and view it as a (subclass of) MaskedArray + with np.errstate(divide='ignore', invalid='ignore'): + result = np.where(m, fa, umath.power(fa, fb)).view(basetype) + result._update_from(a) + # Find where we're in trouble w/ NaNs and Infs + invalid = np.logical_not(np.isfinite(result.view(ndarray))) + # Add the initial mask + if m is not nomask: + if not result.ndim: + return masked + result._mask = np.logical_or(m, invalid) + # Fix the invalid parts + if invalid.any(): + if not result.ndim: + return masked + elif result._mask is nomask: + result._mask = invalid + result._data[invalid] = result.fill_value + return result + +argmin = _frommethod('argmin') +argmax = _frommethod('argmax') + +def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, + fill_value=None, *, stable=None): + "Function version of the eponymous method." + a = np.asanyarray(a) + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(a) + + if isinstance(a, MaskedArray): + return a.argsort(axis=axis, kind=kind, order=order, endwith=endwith, + fill_value=fill_value, stable=None) + else: + return a.argsort(axis=axis, kind=kind, order=order, stable=None) +argsort.__doc__ = MaskedArray.argsort.__doc__ + +def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None, *, + stable=None): + """ + Return a sorted copy of the masked array. + + Equivalent to creating a copy of the array + and applying the MaskedArray ``sort()`` method. + + Refer to ``MaskedArray.sort`` for the full documentation + + See Also + -------- + MaskedArray.sort : equivalent method + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.sort(masked_x) + masked_array(data=[-3.973, 0.801, 11.2, --], + mask=[False, False, False, True], + fill_value=1e+20) + """ + a = np.array(a, copy=True, subok=True) + if axis is None: + a = a.flatten() + axis = 0 + + if isinstance(a, MaskedArray): + a.sort(axis=axis, kind=kind, order=order, endwith=endwith, + fill_value=fill_value, stable=stable) + else: + a.sort(axis=axis, kind=kind, order=order, stable=stable) + return a + + +def compressed(x): + """ + Return all the non-masked data as a 1-D array. + + This function is equivalent to calling the "compressed" method of a + `ma.MaskedArray`, see `ma.MaskedArray.compressed` for details. + + See Also + -------- + ma.MaskedArray.compressed : Equivalent method. + + Examples + -------- + + Create an array with negative values masked: + + >>> import numpy as np + >>> x = np.array([[1, -1, 0], [2, -1, 3], [7, 4, -1]]) + >>> masked_x = np.ma.masked_array(x, mask=x < 0) + >>> masked_x + masked_array( + data=[[1, --, 0], + [2, --, 3], + [7, 4, --]], + mask=[[False, True, False], + [False, True, False], + [False, False, True]], + fill_value=999999) + + Compress the masked array into a 1-D array of non-masked values: + + >>> np.ma.compressed(masked_x) + array([1, 0, 2, 3, 7, 4]) + + """ + return asanyarray(x).compressed() + + +def concatenate(arrays, axis=0): + """ + Concatenate a sequence of arrays along the given axis. + + Parameters + ---------- + arrays : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. Default is 0. + + Returns + ------- + result : MaskedArray + The concatenated array with any masked entries preserved. + + See Also + -------- + numpy.concatenate : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.arange(3) + >>> a[1] = ma.masked + >>> b = ma.arange(2, 5) + >>> a + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999) + >>> b + masked_array(data=[2, 3, 4], + mask=False, + fill_value=999999) + >>> ma.concatenate([a, b]) + masked_array(data=[0, --, 2, 2, 3, 4], + mask=[False, True, False, False, False, False], + fill_value=999999) + + """ + d = np.concatenate([getdata(a) for a in arrays], axis) + rcls = get_masked_subclass(*arrays) + data = d.view(rcls) + # Check whether one of the arrays has a non-empty mask. + for x in arrays: + if getmask(x) is not nomask: + break + else: + return data + # OK, so we have to concatenate the masks + dm = np.concatenate([getmaskarray(a) for a in arrays], axis) + dm = dm.reshape(d.shape) + + # If we decide to keep a '_shrinkmask' option, we want to check that + # all of them are True, and then check for dm.any() + data._mask = _shrink_mask(dm) + return data + + +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + This function is the equivalent of `numpy.diag` that takes masked + values into account, see `numpy.diag` for details. + + See Also + -------- + numpy.diag : Equivalent function for ndarrays. + + Examples + -------- + + Create an array with negative values masked: + + >>> import numpy as np + >>> x = np.array([[11.2, -3.973, 18], [0.801, -1.41, 12], [7, 33, -12]]) + >>> masked_x = np.ma.masked_array(x, mask=x < 0) + >>> masked_x + masked_array( + data=[[11.2, --, 18.0], + [0.801, --, 12.0], + [7.0, 33.0, --]], + mask=[[False, True, False], + [False, True, False], + [False, False, True]], + fill_value=1e+20) + + Isolate the main diagonal from the masked array: + + >>> np.ma.diag(masked_x) + masked_array(data=[11.2, --, --], + mask=[False, True, True], + fill_value=1e+20) + + Isolate the first diagonal below the main diagonal: + + >>> np.ma.diag(masked_x, -1) + masked_array(data=[0.801, 33.0], + mask=[False, False], + fill_value=1e+20) + + """ + output = np.diag(v, k).view(MaskedArray) + if getmask(v) is not nomask: + output._mask = np.diag(v._mask, k) + return output + + +def left_shift(a, n): + """ + Shift the bits of an integer to the left. + + This is the masked array version of `numpy.left_shift`, for details + see that function. + + See Also + -------- + numpy.left_shift + + """ + m = getmask(a) + if m is nomask: + d = umath.left_shift(filled(a), n) + return masked_array(d) + else: + d = umath.left_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def right_shift(a, n): + """ + Shift the bits of an integer to the right. + + This is the masked array version of `numpy.right_shift`, for details + see that function. + + See Also + -------- + numpy.right_shift + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [11, 3, 8, 1] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11, 3, 8, --], + mask=[False, False, False, True], + fill_value=999999) + >>> ma.right_shift(masked_x,1) + masked_array(data=[5, 1, 4, --], + mask=[False, False, False, True], + fill_value=999999) + + """ + m = getmask(a) + if m is nomask: + d = umath.right_shift(filled(a), n) + return masked_array(d) + else: + d = umath.right_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def put(a, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + This function is equivalent to `MaskedArray.put`, see that method + for details. + + See Also + -------- + MaskedArray.put + + """ + # We can't use 'frommethod', the order of arguments is different + try: + return a.put(indices, values, mode=mode) + except AttributeError: + return np.asarray(a).put(indices, values, mode=mode) + + +def putmask(a, mask, values): # , mode='raise'): + """ + Changes elements of an array based on conditional and input values. + + This is the masked array version of `numpy.putmask`, for details see + `numpy.putmask`. + + See Also + -------- + numpy.putmask + + Notes + ----- + Using a masked array as `values` will **not** transform a `ndarray` into + a `MaskedArray`. + + Examples + -------- + >>> arr = [[1, 2], [3, 4]] + >>> mask = [[1, 0], [0, 0]] + >>> x = np.ma.array(arr, mask=mask) + >>> np.ma.putmask(x, x < 4, 10*x) + >>> x + masked_array( + data=[[--, 20], + [30, 4]], + mask=[[ True, False], + [False, False]], + fill_value=999999) + >>> x.data + array([[10, 20], + [30, 4]]) + + """ + # We can't use 'frommethod', the order of arguments is different + if not isinstance(a, MaskedArray): + a = a.view(MaskedArray) + (valdata, valmask) = (getdata(values), getmask(values)) + if getmask(a) is nomask: + if valmask is not nomask: + a._sharedmask = True + a._mask = make_mask_none(a.shape, a.dtype) + np.copyto(a._mask, valmask, where=mask) + elif a._hardmask: + if valmask is not nomask: + m = a._mask.copy() + np.copyto(m, valmask, where=mask) + a.mask |= m + else: + if valmask is nomask: + valmask = getmaskarray(values) + np.copyto(a._mask, valmask, where=mask) + np.copyto(a._data, valdata, where=mask) + return + + +def transpose(a, axes=None): + """ + Permute the dimensions of an array. + + This function is exactly equivalent to `numpy.transpose`. + + See Also + -------- + numpy.transpose : Equivalent function in top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.arange(4).reshape((2,2)) + >>> x[1, 1] = ma.masked + >>> x + masked_array( + data=[[0, 1], + [2, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) + + >>> ma.transpose(x) + masked_array( + data=[[0, 2], + [1, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) + """ + # We can't use 'frommethod', as 'transpose' doesn't take keywords + try: + return a.transpose(axes) + except AttributeError: + return np.asarray(a).transpose(axes).view(MaskedArray) + + +def reshape(a, new_shape, order='C'): + """ + Returns an array containing the same data with a new shape. + + Refer to `MaskedArray.reshape` for full documentation. + + See Also + -------- + MaskedArray.reshape : equivalent function + + """ + # We can't use 'frommethod', it whine about some parameters. Dmmit. + try: + return a.reshape(new_shape, order=order) + except AttributeError: + _tmp = np.asarray(a).reshape(new_shape, order=order) + return _tmp.view(MaskedArray) + + +def resize(x, new_shape): + """ + Return a new masked array with the specified size and shape. + + This is the masked equivalent of the `numpy.resize` function. The new + array is filled with repeated copies of `x` (in the order that the + data are stored in memory). If `x` is masked, the new array will be + masked, and the new mask will be a repetition of the old one. + + See Also + -------- + numpy.resize : Equivalent function in the top level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.array([[1, 2] ,[3, 4]]) + >>> a[0, 1] = ma.masked + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=999999) + >>> np.resize(a, (3, 3)) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) + >>> ma.resize(a, (3, 3)) + masked_array( + data=[[1, --, 3], + [4, 1, --], + [3, 4, 1]], + mask=[[False, True, False], + [False, False, True], + [False, False, False]], + fill_value=999999) + + A MaskedArray is always returned, regardless of the input type. + + >>> a = np.array([[1, 2] ,[3, 4]]) + >>> ma.resize(a, (3, 3)) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) + + """ + # We can't use _frommethods here, as N.resize is notoriously whiny. + m = getmask(x) + if m is not nomask: + m = np.resize(m, new_shape) + result = np.resize(x, new_shape).view(get_masked_subclass(x)) + if result.ndim: + result._mask = m + return result + + +def ndim(obj): + """ + maskedarray version of the numpy function. + + """ + return np.ndim(getdata(obj)) + +ndim.__doc__ = np.ndim.__doc__ + + +def shape(obj): + "maskedarray version of the numpy function." + return np.shape(getdata(obj)) +shape.__doc__ = np.shape.__doc__ + + +def size(obj, axis=None): + "maskedarray version of the numpy function." + return np.size(getdata(obj), axis) +size.__doc__ = np.size.__doc__ + + +def diff(a, /, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): + """ + Calculate the n-th discrete difference along the given axis. + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. + Preserves the input mask. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend, append : array_like, optional + Values to prepend or append to `a` along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match `a` except along axis. + + Returns + ------- + diff : MaskedArray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. + + See Also + -------- + numpy.diff : Equivalent function in the top-level NumPy module. + + Notes + ----- + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. + + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: + + >>> u8_arr = np.array([1, 0], dtype=np.uint8) + >>> np.ma.diff(u8_arr) + masked_array(data=[255], + mask=False, + fill_value=np.int64(999999), + dtype=uint8) + >>> u8_arr[1,...] - u8_arr[0,...] + 255 + + If this is not desirable, then the array should be cast to a larger + integer type first: + + >>> i16_arr = u8_arr.astype(np.int16) + >>> np.ma.diff(i16_arr) + masked_array(data=[-1], + mask=False, + fill_value=np.int64(999999), + dtype=int16) + + Examples + -------- + >>> a = np.array([1, 2, 3, 4, 7, 0, 2, 3]) + >>> x = np.ma.masked_where(a < 2, a) + >>> np.ma.diff(x) + masked_array(data=[--, 1, 1, 3, --, --, 1], + mask=[ True, False, False, False, True, True, False], + fill_value=999999) + + >>> np.ma.diff(x, n=2) + masked_array(data=[--, 0, 2, --, --, --], + mask=[ True, False, False, True, True, True], + fill_value=999999) + + >>> a = np.array([[1, 3, 1, 5, 10], [0, 1, 5, 6, 8]]) + >>> x = np.ma.masked_equal(a, value=1) + >>> np.ma.diff(x) + masked_array( + data=[[--, --, --, 5], + [--, --, 1, 2]], + mask=[[ True, True, True, False], + [ True, True, False, False]], + fill_value=1) + + >>> np.ma.diff(x, axis=0) + masked_array(data=[[--, --, --, 1, -2]], + mask=[[ True, True, True, False, False]], + fill_value=1) + + """ + if n == 0: + return a + if n < 0: + raise ValueError("order must be non-negative but got " + repr(n)) + + a = np.ma.asanyarray(a) + if a.ndim == 0: + raise ValueError( + "diff requires input that is at least one dimensional" + ) + + combined = [] + if prepend is not np._NoValue: + prepend = np.ma.asanyarray(prepend) + if prepend.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + prepend = np.broadcast_to(prepend, tuple(shape)) + combined.append(prepend) + + combined.append(a) + + if append is not np._NoValue: + append = np.ma.asanyarray(append) + if append.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + append = np.broadcast_to(append, tuple(shape)) + combined.append(append) + + if len(combined) > 1: + a = np.ma.concatenate(combined, axis) + + # GH 22465 np.diff without prepend/append preserves the mask + return np.diff(a, n, axis) + + +############################################################################## +# Extra functions # +############################################################################## + + +def where(condition, x=_NoValue, y=_NoValue): + """ + Return a masked array with elements from `x` or `y`, depending on condition. + + .. note:: + When only `condition` is provided, this function is identical to + `nonzero`. The rest of this documentation covers only the case where + all three arguments are provided. + + Parameters + ---------- + condition : array_like, bool + Where True, yield `x`, otherwise yield `y`. + x, y : array_like, optional + Values from which to choose. `x`, `y` and `condition` need to be + broadcastable to some shape. + + Returns + ------- + out : MaskedArray + An masked array with `masked` elements where the condition is masked, + elements from `x` where `condition` is True, and elements from `y` + elsewhere. + + See Also + -------- + numpy.where : Equivalent function in the top-level NumPy module. + nonzero : The function that is called when x and y are omitted + + Examples + -------- + >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], + ... [1, 0, 1], + ... [0, 1, 0]]) + >>> x + masked_array( + data=[[0.0, --, 2.0], + [--, 4.0, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) + >>> np.ma.where(x > 5, x, -3.1416) + masked_array( + data=[[-3.1416, --, -3.1416], + [--, -3.1416, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) + + """ + + # handle the single-argument case + missing = (x is _NoValue, y is _NoValue).count(True) + if missing == 1: + raise ValueError("Must provide both 'x' and 'y' or neither.") + if missing == 2: + return nonzero(condition) + + # we only care if the condition is true - false or masked pick y + cf = filled(condition, False) + xd = getdata(x) + yd = getdata(y) + + # we need the full arrays here for correct final dimensions + cm = getmaskarray(condition) + xm = getmaskarray(x) + ym = getmaskarray(y) + + # deal with the fact that masked.dtype == float64, but we don't actually + # want to treat it as that. + if x is masked and y is not masked: + xd = np.zeros((), dtype=yd.dtype) + xm = np.ones((), dtype=ym.dtype) + elif y is masked and x is not masked: + yd = np.zeros((), dtype=xd.dtype) + ym = np.ones((), dtype=xm.dtype) + + data = np.where(cf, xd, yd) + mask = np.where(cf, xm, ym) + mask = np.where(cm, np.ones((), dtype=mask.dtype), mask) + + # collapse the mask, for backwards compatibility + mask = _shrink_mask(mask) + + return masked_array(data, mask=mask) + + +def choose(indices, choices, out=None, mode='raise'): + """ + Use an index array to construct a new array from a list of choices. + + Given an array of integers and a list of n choice arrays, this method + will create a new array that merges each of the choice arrays. Where a + value in `index` is i, the new array will have the value that choices[i] + contains in the same place. + + Parameters + ---------- + indices : ndarray of ints + This array must contain integers in ``[0, n-1]``, where n is the + number of choices. + choices : sequence of arrays + Choice arrays. The index array and all of the choices should be + broadcastable to the same shape. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and `dtype`. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' : raise an error + * 'wrap' : wrap around + * 'clip' : clip to the range + + Returns + ------- + merged_array : array + + See Also + -------- + choose : equivalent function + + Examples + -------- + >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) + >>> a = np.array([2, 1, 0]) + >>> np.ma.choose(a, choice) + masked_array(data=[3, 2, 1], + mask=False, + fill_value=999999) + + """ + def fmask(x): + "Returns the filled array, or True if masked." + if x is masked: + return True + return filled(x) + + def nmask(x): + "Returns the mask, True if ``masked``, False if ``nomask``." + if x is masked: + return True + return getmask(x) + # Get the indices. + c = filled(indices, 0) + # Get the masks. + masks = [nmask(x) for x in choices] + data = [fmask(x) for x in choices] + # Construct the mask + outputmask = np.choose(c, masks, mode=mode) + outputmask = make_mask(mask_or(outputmask, getmask(indices)), + copy=False, shrink=True) + # Get the choices. + d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(outputmask) + return out + d.__setmask__(outputmask) + return d + + +def round_(a, decimals=0, out=None): + """ + Return a copy of a, rounded to 'decimals' places. + + When 'decimals' is negative, it specifies the number of positions + to the left of the decimal point. The real and imaginary parts of + complex numbers are rounded separately. Nothing is done if the + array is not of float type and 'decimals' is greater than or equal + to 0. + + Parameters + ---------- + decimals : int + Number of decimals to round to. May be negative. + out : array_like + Existing array to use for output. + If not given, returns a default copy of a. + + Notes + ----- + If out is given and does not have a mask attribute, the mask of a + is lost! + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x) + masked_array(data=[11.0, -4.0, 1.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round(masked_x, decimals=1) + masked_array(data=[11.2, -4.0, 0.8, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x, decimals=-1) + masked_array(data=[10.0, -0.0, 0.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + """ + if out is None: + return np.round(a, decimals, out) + else: + np.round(getdata(a), decimals, out) + if hasattr(out, '_mask'): + out._mask = getmask(a) + return out +round = round_ + + +def _mask_propagate(a, axis): + """ + Mask whole 1-d vectors of an array that contain masked values. + """ + a = array(a, subok=False) + m = getmask(a) + if m is nomask or not m.any() or axis is None: + return a + a._mask = a._mask.copy() + axes = normalize_axis_tuple(axis, a.ndim) + for ax in axes: + a._mask |= m.any(axis=ax, keepdims=True) + return a + + +# Include masked dot here to avoid import problems in getting it from +# extras.py. Note that it is not included in __all__, but rather exported +# from extras in order to avoid backward compatibility problems. +def dot(a, b, strict=False, out=None): + """ + Return the dot product of two arrays. + + This function is the equivalent of `numpy.dot` that takes masked values + into account. Note that `strict` and `out` are in different position + than in the method version. In order to maintain compatibility with the + corresponding method, it is recommended that the optional arguments be + treated as keyword only. At some point that may be mandatory. + + Parameters + ---------- + a, b : masked_array_like + Inputs arrays. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) for + the computation. Default is False. Propagating the mask means that + if a masked value appears in a row or column, the whole row or + column is considered masked. + out : masked_array, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + .. versionadded:: 1.10.2 + + See Also + -------- + numpy.dot : Equivalent function for ndarrays. + + Examples + -------- + >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) + >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) + >>> np.ma.dot(a, b) + masked_array( + data=[[21, 26], + [45, 64]], + mask=[[False, False], + [False, False]], + fill_value=999999) + >>> np.ma.dot(a, b, strict=True) + masked_array( + data=[[--, --], + [--, 64]], + mask=[[ True, True], + [ True, False]], + fill_value=999999) + + """ + if strict is True: + if np.ndim(a) == 0 or np.ndim(b) == 0: + pass + elif b.ndim == 1: + a = _mask_propagate(a, a.ndim - 1) + b = _mask_propagate(b, b.ndim - 1) + else: + a = _mask_propagate(a, a.ndim - 1) + b = _mask_propagate(b, b.ndim - 2) + am = ~getmaskarray(a) + bm = ~getmaskarray(b) + + if out is None: + d = np.dot(filled(a, 0), filled(b, 0)) + m = ~np.dot(am, bm) + if np.ndim(d) == 0: + d = np.asarray(d) + r = d.view(get_masked_subclass(a, b)) + r.__setmask__(m) + return r + else: + d = np.dot(filled(a, 0), filled(b, 0), out._data) + if out.mask.shape != d.shape: + out._mask = np.empty(d.shape, MaskType) + np.dot(am, bm, out._mask) + np.logical_not(out._mask, out._mask) + return out + + +def inner(a, b): + """ + Returns the inner product of a and b for arrays of floating point types. + + Like the generic NumPy equivalent the product sum is over the last dimension + of a and b. The first argument is not conjugated. + + """ + fa = filled(a, 0) + fb = filled(b, 0) + if fa.ndim == 0: + fa.shape = (1,) + if fb.ndim == 0: + fb.shape = (1,) + return np.inner(fa, fb).view(MaskedArray) +inner.__doc__ = doc_note(np.inner.__doc__, + "Masked values are replaced by 0.") +innerproduct = inner + + +def outer(a, b): + "maskedarray version of the numpy function." + fa = filled(a, 0).ravel() + fb = filled(b, 0).ravel() + d = np.outer(fa, fb) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + return masked_array(d) + ma = getmaskarray(a) + mb = getmaskarray(b) + m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False) + return masked_array(d, mask=m) +outer.__doc__ = doc_note(np.outer.__doc__, + "Masked values are replaced by 0.") +outerproduct = outer + + +def _convolve_or_correlate(f, a, v, mode, propagate_mask): + """ + Helper function for ma.correlate and ma.convolve + """ + if propagate_mask: + # results which are contributed to by either item in any pair being invalid + mask = ( + f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode) + | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode) + ) + data = f(getdata(a), getdata(v), mode=mode) + else: + # results which are not contributed to by any pair of valid elements + mask = ~f(~getmaskarray(a), ~getmaskarray(v), mode=mode) + data = f(filled(a, 0), filled(v, 0), mode=mode) + + return masked_array(data, mask=mask) + + +def correlate(a, v, mode='valid', propagate_mask=True): + """ + Cross-correlation of two 1-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. Note that the default + is 'valid', unlike `convolve`, which uses 'full'. + propagate_mask : bool + If True, then a result element is masked if any masked element contributes towards it. + If False, then a result element is only masked if no non-masked element + contribute towards it + + Returns + ------- + out : MaskedArray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + numpy.correlate : Equivalent function in the top-level NumPy module. + """ + return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask) + + +def convolve(a, v, mode='full', propagate_mask=True): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. + propagate_mask : bool + If True, then if any masked element is included in the sum for a result + element, then the result is masked. + If False, then the result element is only masked if no non-masked cells + contribute towards it + + Returns + ------- + out : MaskedArray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + numpy.convolve : Equivalent function in the top-level NumPy module. + """ + return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask) + + +def allequal(a, b, fill_value=True): + """ + Return True if all entries of a and b are equal, using + fill_value as a truth value where either or both are masked. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + fill_value : bool, optional + Whether masked values in a or b are considered equal (True) or not + (False). + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, + then False is returned. + + See Also + -------- + all, any + numpy.ma.allclose + + Examples + -------- + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) + + >>> b = np.array([1e10, 1e-7, -42.0]) + >>> b + array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) + >>> np.ma.allequal(a, b, fill_value=False) + False + >>> np.ma.allequal(a, b) + True + + """ + m = mask_or(getmask(a), getmask(b)) + if m is nomask: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + return d.all() + elif fill_value: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + dm = array(d, mask=m, copy=False) + return dm.filled(True).all(None) + else: + return False + + +def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + This function is equivalent to `allclose` except that masked values + are treated as equal (default) or unequal, depending on the `masked_equal` + argument. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + masked_equal : bool, optional + Whether masked values in `a` and `b` are considered equal (True) or not + (False). They are considered equal by default. + rtol : float, optional + Relative tolerance. The relative difference is equal to ``rtol * b``. + Default is 1e-5. + atol : float, optional + Absolute tolerance. The absolute difference is equal to `atol`. + Default is 1e-8. + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, then + False is returned. + + See Also + -------- + all, any + numpy.allclose : the non-masked `allclose`. + + Notes + ----- + If the following equation is element-wise True, then `allclose` returns + True:: + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + Return True if all elements of `a` and `b` are equal subject to + given tolerances. + + Examples + -------- + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) + >>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + False + + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + True + >>> np.ma.allclose(a, b, masked_equal=False) + False + + Masked values are not compared directly. + + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + True + >>> np.ma.allclose(a, b, masked_equal=False) + False + + """ + x = masked_array(a, copy=False) + y = masked_array(b, copy=False) + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + # NOTE: We explicitly allow timedelta, which used to work. This could + # possibly be deprecated. See also gh-18286. + # timedelta works if `atol` is an integer or also a timedelta. + # Although, the default tolerances are unlikely to be useful + if y.dtype.kind != "m": + dtype = np.result_type(y, 1.) + if y.dtype != dtype: + y = masked_array(y, dtype=dtype, copy=False) + + m = mask_or(getmask(x), getmask(y)) + xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) + # If we have some infs, they should fall at the same place. + if not np.all(xinf == filled(np.isinf(y), False)): + return False + # No infs at all + if not np.any(xinf): + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + return np.all(d) + + if not np.all(filled(x[xinf] == y[xinf], masked_equal)): + return False + x = x[~xinf] + y = y[~xinf] + + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + + return np.all(d) + + +def asarray(a, dtype=None, order=None): + """ + Convert the input to a masked array of the given data-type. + + No copy is performed if the input is already an `ndarray`. If `a` is + a subclass of `MaskedArray`, a base class `MaskedArray` is returned. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to a masked array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists, ndarrays and masked arrays. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + Masked array interpretation of `a`. + + See Also + -------- + asanyarray : Similar to `asarray`, but conserves subclasses. + + Examples + -------- + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) + >>> np.ma.asarray(x) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) + >>> type(np.ma.asarray(x)) + + + """ + order = order or 'C' + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, + subok=False, order=order) + + +def asanyarray(a, dtype=None): + """ + Convert the input to a masked array, conserving subclasses. + + If `a` is a subclass of `MaskedArray`, its class is conserved. + No copy is performed if the input is already an `ndarray`. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + MaskedArray interpretation of `a`. + + See Also + -------- + asarray : Similar to `asanyarray`, but does not conserve subclass. + + Examples + -------- + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) + >>> np.ma.asanyarray(x) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) + >>> type(np.ma.asanyarray(x)) + + + """ + # workaround for #8666, to preserve identity. Ideally the bottom line + # would handle this for us. + if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): + return a + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) + + +############################################################################## +# Pickling # +############################################################################## + + +def fromfile(file, dtype=float, count=-1, sep=''): + raise NotImplementedError( + "fromfile() not yet implemented for a MaskedArray.") + + +def fromflex(fxarray): + """ + Build a masked array from a suitable flexible-type array. + + The input array has to have a data-type with ``_data`` and ``_mask`` + fields. This type of array is output by `MaskedArray.toflex`. + + Parameters + ---------- + fxarray : ndarray + The structured input array, containing ``_data`` and ``_mask`` + fields. If present, other fields are discarded. + + Returns + ------- + result : MaskedArray + The constructed masked array. + + See Also + -------- + MaskedArray.toflex : Build a flexible-type array from a masked array. + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) + >>> rec = x.toflex() + >>> rec + array([[(0, False), (1, True), (2, False)], + [(3, True), (4, False), (5, True)], + [(6, False), (7, True), (8, False)]], + dtype=[('_data', '>> x2 = np.ma.fromflex(rec) + >>> x2 + masked_array( + data=[[0, --, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + Extra fields can be present in the structured array but are discarded: + + >>> dt = [('_data', '>> rec2 = np.zeros((2, 2), dtype=dt) + >>> rec2 + array([[(0, False, 0.), (0, False, 0.)], + [(0, False, 0.), (0, False, 0.)]], + dtype=[('_data', '>> y = np.ma.fromflex(rec2) + >>> y + masked_array( + data=[[0, 0], + [0, 0]], + mask=[[False, False], + [False, False]], + fill_value=np.int64(999999), + dtype=int32) + + """ + return masked_array(fxarray['_data'], mask=fxarray['_mask']) + + +class _convert2ma: + + """ + Convert functions from numpy to numpy.ma. + + Parameters + ---------- + _methodname : string + Name of the method to transform. + + """ + __doc__ = None + + def __init__(self, funcname, np_ret, np_ma_ret, params=None): + self._func = getattr(np, funcname) + self.__doc__ = self.getdoc(np_ret, np_ma_ret) + self._extras = params or {} + + def getdoc(self, np_ret, np_ma_ret): + "Return the doc of the function (from the doc of the method)." + doc = getattr(self._func, '__doc__', None) + sig = get_object_signature(self._func) + if doc: + doc = self._replace_return_type(doc, np_ret, np_ma_ret) + # Add the signature of the function at the beginning of the doc + if sig: + sig = "%s%s\n" % (self._func.__name__, sig) + doc = sig + doc + return doc + + def _replace_return_type(self, doc, np_ret, np_ma_ret): + """ + Replace documentation of ``np`` function's return type. + + Replaces it with the proper type for the ``np.ma`` function. + + Parameters + ---------- + doc : str + The documentation of the ``np`` method. + np_ret : str + The return type string of the ``np`` method that we want to + replace. (e.g. "out : ndarray") + np_ma_ret : str + The return type string of the ``np.ma`` method. + (e.g. "out : MaskedArray") + """ + if np_ret not in doc: + raise RuntimeError( + f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " + f"The documentation string for return type, {np_ret}, is not " + f"found in the docstring for `np.{self._func.__name__}`. " + f"Fix the docstring for `np.{self._func.__name__}` or " + "update the expected string for return type." + ) + + return doc.replace(np_ret, np_ma_ret) + + def __call__(self, *args, **params): + # Find the common parameters to the call and the definition + _extras = self._extras + common_params = set(params).intersection(_extras) + # Drop the common parameters from the call + for p in common_params: + _extras[p] = params.pop(p) + # Get the result + result = self._func.__call__(*args, **params).view(MaskedArray) + if "fill_value" in common_params: + result.fill_value = _extras.get("fill_value", None) + if "hardmask" in common_params: + result._hardmask = bool(_extras.get("hard_mask", False)) + return result + + +arange = _convert2ma( + 'arange', + params=dict(fill_value=None, hardmask=False), + np_ret='arange : ndarray', + np_ma_ret='arange : MaskedArray', +) +clip = _convert2ma( + 'clip', + params=dict(fill_value=None, hardmask=False), + np_ret='clipped_array : ndarray', + np_ma_ret='clipped_array : MaskedArray', +) +empty = _convert2ma( + 'empty', + params=dict(fill_value=None, hardmask=False), + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +empty_like = _convert2ma( + 'empty_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +frombuffer = _convert2ma( + 'frombuffer', + np_ret='out : ndarray', + np_ma_ret='out: MaskedArray', +) +fromfunction = _convert2ma( + 'fromfunction', + np_ret='fromfunction : any', + np_ma_ret='fromfunction: MaskedArray', +) +identity = _convert2ma( + 'identity', + params=dict(fill_value=None, hardmask=False), + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +indices = _convert2ma( + 'indices', + params=dict(fill_value=None, hardmask=False), + np_ret='grid : one ndarray or tuple of ndarrays', + np_ma_ret='grid : one MaskedArray or tuple of MaskedArrays', +) +ones = _convert2ma( + 'ones', + params=dict(fill_value=None, hardmask=False), + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +ones_like = _convert2ma( + 'ones_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +squeeze = _convert2ma( + 'squeeze', + params=dict(fill_value=None, hardmask=False), + np_ret='squeezed : ndarray', + np_ma_ret='squeezed : MaskedArray', +) +zeros = _convert2ma( + 'zeros', + params=dict(fill_value=None, hardmask=False), + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +zeros_like = _convert2ma( + 'zeros_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) + + +def append(a, b, axis=None): + """Append values to the end of an array. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + a : array_like + Values are appended to a copy of this array. + b : array_like + These values are appended to a copy of `a`. It must be of the + correct shape (the same shape as `a`, excluding `axis`). If `axis` + is not specified, `b` can be any shape and will be flattened + before use. + axis : int, optional + The axis along which `v` are appended. If `axis` is not given, + both `a` and `b` are flattened before use. + + Returns + ------- + append : MaskedArray + A copy of `a` with `b` appended to `axis`. Note that `append` + does not occur in-place: a new array is allocated and filled. If + `axis` is None, the result is a flattened array. + + See Also + -------- + numpy.append : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_values([1, 2, 3], 2) + >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + >>> ma.append(a, b) + masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9], + mask=[False, True, False, False, False, False, True, False, + False], + fill_value=999999) + """ + return concatenate([a, b], axis) diff --git a/phivenv/Lib/site-packages/numpy/ma/core.pyi b/phivenv/Lib/site-packages/numpy/ma/core.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f2d3758f095f5c2a57d267e368e430be20ef0a6c --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/core.pyi @@ -0,0 +1,469 @@ +from collections.abc import Callable +from typing import Any, TypeVar +from numpy import ndarray, dtype, float64 + +from numpy import ( + amax as amax, + amin as amin, + bool as bool, + expand_dims as expand_dims, + clip as clip, + indices as indices, + ones_like as ones_like, + squeeze as squeeze, + zeros_like as zeros_like, + angle as angle +) + +# TODO: Set the `bound` to something more suitable once we +# have proper shape support +_ShapeType = TypeVar("_ShapeType", bound=Any) +_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) + +__all__: list[str] + +MaskType = bool +nomask: bool + +class MaskedArrayFutureWarning(FutureWarning): ... +class MAError(Exception): ... +class MaskError(MAError): ... + +def default_fill_value(obj): ... +def minimum_fill_value(obj): ... +def maximum_fill_value(obj): ... +def set_fill_value(a, fill_value): ... +def common_fill_value(a, b): ... +def filled(a, fill_value=...): ... +def getdata(a, subok=...): ... +get_data = getdata + +def fix_invalid(a, mask=..., copy=..., fill_value=...): ... + +class _MaskedUFunc: + f: Any + __doc__: Any + __name__: Any + def __init__(self, ufunc): ... + +class _MaskedUnaryOperation(_MaskedUFunc): + fill: Any + domain: Any + def __init__(self, mufunc, fill=..., domain=...): ... + def __call__(self, a, *args, **kwargs): ... + +class _MaskedBinaryOperation(_MaskedUFunc): + fillx: Any + filly: Any + def __init__(self, mbfunc, fillx=..., filly=...): ... + def __call__(self, a, b, *args, **kwargs): ... + def reduce(self, target, axis=..., dtype=...): ... + def outer(self, a, b): ... + def accumulate(self, target, axis=...): ... + +class _DomainedBinaryOperation(_MaskedUFunc): + domain: Any + fillx: Any + filly: Any + def __init__(self, dbfunc, domain, fillx=..., filly=...): ... + def __call__(self, a, b, *args, **kwargs): ... + +exp: _MaskedUnaryOperation +conjugate: _MaskedUnaryOperation +sin: _MaskedUnaryOperation +cos: _MaskedUnaryOperation +arctan: _MaskedUnaryOperation +arcsinh: _MaskedUnaryOperation +sinh: _MaskedUnaryOperation +cosh: _MaskedUnaryOperation +tanh: _MaskedUnaryOperation +abs: _MaskedUnaryOperation +absolute: _MaskedUnaryOperation +fabs: _MaskedUnaryOperation +negative: _MaskedUnaryOperation +floor: _MaskedUnaryOperation +ceil: _MaskedUnaryOperation +around: _MaskedUnaryOperation +logical_not: _MaskedUnaryOperation +sqrt: _MaskedUnaryOperation +log: _MaskedUnaryOperation +log2: _MaskedUnaryOperation +log10: _MaskedUnaryOperation +tan: _MaskedUnaryOperation +arcsin: _MaskedUnaryOperation +arccos: _MaskedUnaryOperation +arccosh: _MaskedUnaryOperation +arctanh: _MaskedUnaryOperation + +add: _MaskedBinaryOperation +subtract: _MaskedBinaryOperation +multiply: _MaskedBinaryOperation +arctan2: _MaskedBinaryOperation +equal: _MaskedBinaryOperation +not_equal: _MaskedBinaryOperation +less_equal: _MaskedBinaryOperation +greater_equal: _MaskedBinaryOperation +less: _MaskedBinaryOperation +greater: _MaskedBinaryOperation +logical_and: _MaskedBinaryOperation +alltrue: _MaskedBinaryOperation +logical_or: _MaskedBinaryOperation +sometrue: Callable[..., Any] +logical_xor: _MaskedBinaryOperation +bitwise_and: _MaskedBinaryOperation +bitwise_or: _MaskedBinaryOperation +bitwise_xor: _MaskedBinaryOperation +hypot: _MaskedBinaryOperation +divide: _MaskedBinaryOperation +true_divide: _MaskedBinaryOperation +floor_divide: _MaskedBinaryOperation +remainder: _MaskedBinaryOperation +fmod: _MaskedBinaryOperation +mod: _MaskedBinaryOperation + +def make_mask_descr(ndtype): ... +def getmask(a): ... +get_mask = getmask + +def getmaskarray(arr): ... +def is_mask(m): ... +def make_mask(m, copy=..., shrink=..., dtype=...): ... +def make_mask_none(newshape, dtype=...): ... +def mask_or(m1, m2, copy=..., shrink=...): ... +def flatten_mask(mask): ... +def masked_where(condition, a, copy=...): ... +def masked_greater(x, value, copy=...): ... +def masked_greater_equal(x, value, copy=...): ... +def masked_less(x, value, copy=...): ... +def masked_less_equal(x, value, copy=...): ... +def masked_not_equal(x, value, copy=...): ... +def masked_equal(x, value, copy=...): ... +def masked_inside(x, v1, v2, copy=...): ... +def masked_outside(x, v1, v2, copy=...): ... +def masked_object(x, value, copy=..., shrink=...): ... +def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...): ... +def masked_invalid(a, copy=...): ... + +class _MaskedPrintOption: + def __init__(self, display): ... + def display(self): ... + def set_display(self, s): ... + def enabled(self): ... + def enable(self, shrink=...): ... + +masked_print_option: _MaskedPrintOption + +def flatten_structured_array(a): ... + +class MaskedIterator: + ma: Any + dataiter: Any + maskiter: Any + def __init__(self, ma): ... + def __iter__(self): ... + def __getitem__(self, indx): ... + def __setitem__(self, index, value): ... + def __next__(self): ... + +class MaskedArray(ndarray[_ShapeType, _DType_co]): + __array_priority__: Any + def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... + def __array_finalize__(self, obj): ... + def __array_wrap__(self, obj, context=..., return_scalar=...): ... + def view(self, dtype=..., type=..., fill_value=...): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + @property + def dtype(self): ... + @dtype.setter + def dtype(self, dtype): ... + @property + def shape(self): ... + @shape.setter + def shape(self, shape): ... + def __setmask__(self, mask, copy=...): ... + @property + def mask(self): ... + @mask.setter + def mask(self, value): ... + @property + def recordmask(self): ... + @recordmask.setter + def recordmask(self, mask): ... + def harden_mask(self): ... + def soften_mask(self): ... + @property + def hardmask(self): ... + def unshare_mask(self): ... + @property + def sharedmask(self): ... + def shrink_mask(self): ... + @property + def baseclass(self): ... + data: Any + @property + def flat(self): ... + @flat.setter + def flat(self, value): ... + @property + def fill_value(self): ... + @fill_value.setter + def fill_value(self, value=...): ... + get_fill_value: Any + set_fill_value: Any + def filled(self, fill_value=...): ... + def compressed(self): ... + def compress(self, condition, axis=..., out=...): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def __ge__(self, other): ... + def __gt__(self, other): ... + def __le__(self, other): ... + def __lt__(self, other): ... + def __add__(self, other): ... + def __radd__(self, other): ... + def __sub__(self, other): ... + def __rsub__(self, other): ... + def __mul__(self, other): ... + def __rmul__(self, other): ... + def __div__(self, other): ... + def __truediv__(self, other): ... + def __rtruediv__(self, other): ... + def __floordiv__(self, other): ... + def __rfloordiv__(self, other): ... + def __pow__(self, other): ... + def __rpow__(self, other): ... + def __iadd__(self, other): ... + def __isub__(self, other): ... + def __imul__(self, other): ... + def __idiv__(self, other): ... + def __ifloordiv__(self, other): ... + def __itruediv__(self, other): ... + def __ipow__(self, other): ... + def __float__(self): ... + def __int__(self): ... + @property # type: ignore[misc] + def imag(self): ... + get_imag: Any + @property # type: ignore[misc] + def real(self): ... + get_real: Any + def count(self, axis=..., keepdims=...): ... + def ravel(self, order=...): ... + def reshape(self, *s, **kwargs): ... + def resize(self, newshape, refcheck=..., order=...): ... + def put(self, indices, values, mode=...): ... + def ids(self): ... + def iscontiguous(self): ... + def all(self, axis=..., out=..., keepdims=...): ... + def any(self, axis=..., out=..., keepdims=...): ... + def nonzero(self): ... + def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... + def dot(self, b, out=..., strict=...): ... + def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... + def cumsum(self, axis=..., dtype=..., out=...): ... + def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... + product: Any + def cumprod(self, axis=..., dtype=..., out=...): ... + def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... + def anom(self, axis=..., dtype=...): ... + def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + def round(self, decimals=..., out=...): ... + def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... + def argmin(self, axis=..., fill_value=..., out=..., *, keepdims=...): ... + def argmax(self, axis=..., fill_value=..., out=..., *, keepdims=...): ... + def sort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... + def min(self, axis=..., out=..., fill_value=..., keepdims=...): ... + # NOTE: deprecated + # def tostring(self, fill_value=..., order=...): ... + def max(self, axis=..., out=..., fill_value=..., keepdims=...): ... + def ptp(self, axis=..., out=..., fill_value=..., keepdims=...): ... + def partition(self, *args, **kwargs): ... + def argpartition(self, *args, **kwargs): ... + def take(self, indices, axis=..., out=..., mode=...): ... + copy: Any + diagonal: Any + flatten: Any + repeat: Any + squeeze: Any + swapaxes: Any + T: Any + transpose: Any + @property # type: ignore[misc] + def mT(self): ... + def tolist(self, fill_value=...): ... + def tobytes(self, fill_value=..., order=...): ... + def tofile(self, fid, sep=..., format=...): ... + def toflex(self): ... + torecords: Any + def __reduce__(self): ... + def __deepcopy__(self, memo=...): ... + +class mvoid(MaskedArray[_ShapeType, _DType_co]): + def __new__( + self, + data, + mask=..., + dtype=..., + fill_value=..., + hardmask=..., + copy=..., + subok=..., + ): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def __iter__(self): ... + def __len__(self): ... + def filled(self, fill_value=...): ... + def tolist(self): ... + +def isMaskedArray(x): ... +isarray = isMaskedArray +isMA = isMaskedArray + +# 0D float64 array +class MaskedConstant(MaskedArray[Any, dtype[float64]]): + def __new__(cls): ... + __class__: Any + def __array_finalize__(self, obj): ... + def __array_wrap__(self, obj, context=..., return_scalar=...): ... + def __format__(self, format_spec): ... + def __reduce__(self): ... + def __iop__(self, other): ... + __iadd__: Any + __isub__: Any + __imul__: Any + __ifloordiv__: Any + __itruediv__: Any + __ipow__: Any + def copy(self, *args, **kwargs): ... + def __copy__(self): ... + def __deepcopy__(self, memo): ... + def __setattr__(self, attr, value): ... + +masked: MaskedConstant +masked_singleton: MaskedConstant +masked_array = MaskedArray + +def array( + data, + dtype=..., + copy=..., + order=..., + mask=..., + fill_value=..., + keep_mask=..., + hard_mask=..., + shrink=..., + subok=..., + ndmin=..., +): ... +def is_masked(x): ... + +class _extrema_operation(_MaskedUFunc): + compare: Any + fill_value_func: Any + def __init__(self, ufunc, compare, fill_value): ... + # NOTE: in practice `b` has a default value, but users should + # explicitly provide a value here as the default is deprecated + def __call__(self, a, b): ... + def reduce(self, target, axis=...): ... + def outer(self, a, b): ... + +def min(obj, axis=..., out=..., fill_value=..., keepdims=...): ... +def max(obj, axis=..., out=..., fill_value=..., keepdims=...): ... +def ptp(obj, axis=..., out=..., fill_value=..., keepdims=...): ... + +class _frommethod: + __name__: Any + __doc__: Any + reversed: Any + def __init__(self, methodname, reversed=...): ... + def getdoc(self): ... + def __call__(self, a, *args, **params): ... + +all: _frommethod +anomalies: _frommethod +anom: _frommethod +any: _frommethod +compress: _frommethod +cumprod: _frommethod +cumsum: _frommethod +copy: _frommethod +diagonal: _frommethod +harden_mask: _frommethod +ids: _frommethod +mean: _frommethod +nonzero: _frommethod +prod: _frommethod +product: _frommethod +ravel: _frommethod +repeat: _frommethod +soften_mask: _frommethod +std: _frommethod +sum: _frommethod +swapaxes: _frommethod +trace: _frommethod +var: _frommethod +count: _frommethod +argmin: _frommethod +argmax: _frommethod + +minimum: _extrema_operation +maximum: _extrema_operation + +def take(a, indices, axis=..., out=..., mode=...): ... +def power(a, b, third=...): ... +def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... +def sort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... +def compressed(x): ... +def concatenate(arrays, axis=...): ... +def diag(v, k=...): ... +def left_shift(a, n): ... +def right_shift(a, n): ... +def put(a, indices, values, mode=...): ... +def putmask(a, mask, values): ... +def transpose(a, axes=...): ... +def reshape(a, new_shape, order=...): ... +def resize(x, new_shape): ... +def ndim(obj): ... +def shape(obj): ... +def size(obj, axis=...): ... +def diff(a, /, n=..., axis=..., prepend=..., append=...): ... +def where(condition, x=..., y=...): ... +def choose(indices, choices, out=..., mode=...): ... +def round(a, decimals=..., out=...): ... + +def inner(a, b): ... +innerproduct = inner + +def outer(a, b): ... +outerproduct = outer + +def correlate(a, v, mode=..., propagate_mask=...): ... +def convolve(a, v, mode=..., propagate_mask=...): ... +def allequal(a, b, fill_value=...): ... +def allclose(a, b, masked_equal=..., rtol=..., atol=...): ... +def asarray(a, dtype=..., order=...): ... +def asanyarray(a, dtype=...): ... +def fromflex(fxarray): ... + +class _convert2ma: + __doc__: Any + def __init__(self, funcname, params=...): ... + def getdoc(self): ... + def __call__(self, *args, **params): ... + +arange: _convert2ma +empty: _convert2ma +empty_like: _convert2ma +frombuffer: _convert2ma +fromfunction: _convert2ma +identity: _convert2ma +ones: _convert2ma +zeros: _convert2ma + +def append(a, b, axis=...): ... +def dot(a, b, strict=..., out=...): ... +def mask_rowcols(a, axis=...): ... diff --git a/phivenv/Lib/site-packages/numpy/ma/extras.py b/phivenv/Lib/site-packages/numpy/ma/extras.py new file mode 100644 index 0000000000000000000000000000000000000000..a7e3fd6d5600488539d982b87f5b77561224a3e0 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/extras.py @@ -0,0 +1,2311 @@ +""" +Masked arrays add-ons. + +A collection of utilities for `numpy.ma`. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +__all__ = [ + 'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d', + 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', 'column_stack', + 'compress_cols', 'compress_nd', 'compress_rowcols', 'compress_rows', + 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', 'dstack', 'ediff1d', + 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack', + 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', 'mask_rows', + 'masked_all', 'masked_all_like', 'median', 'mr_', 'ndenumerate', + 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack', + 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack', + ] + +import itertools +import warnings + +from . import core as ma +from .core import ( + MaskedArray, MAError, add, array, asarray, concatenate, filled, count, + getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or, + nomask, ones, sort, zeros, getdata, get_masked_subclass, dot + ) + +import numpy as np +from numpy import ndarray, array as nxarray +from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple +from numpy.lib._function_base_impl import _ureduce +from numpy.lib._index_tricks_impl import AxisConcatenator +from numpy._core.numeric import normalize_axis_tuple + + +def issequence(seq): + """ + Is seq a sequence (ndarray, list or tuple)? + + """ + return isinstance(seq, (ndarray, tuple, list)) + + +def count_masked(arr, axis=None): + """ + Count the number of masked elements along the given axis. + + Parameters + ---------- + arr : array_like + An array with (possibly) masked elements. + axis : int, optional + Axis along which to count. If None (default), a flattened + version of the array is used. + + Returns + ------- + count : int, ndarray + The total number of masked elements (axis=None) or the number + of masked elements along each slice of the given axis. + + See Also + -------- + MaskedArray.count : Count non-masked elements. + + Examples + -------- + >>> a = np.arange(9).reshape((3,3)) + >>> a = np.ma.array(a) + >>> a[1, 0] = np.ma.masked + >>> a[1, 2] = np.ma.masked + >>> a[2, 1] = np.ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, False, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> np.ma.count_masked(a) + 3 + + When the `axis` keyword is used an array is returned. + + >>> np.ma.count_masked(a, axis=0) + array([1, 1, 1]) + >>> np.ma.count_masked(a, axis=1) + array([0, 2, 1]) + + """ + m = getmaskarray(arr) + return m.sum(axis) + + +def masked_all(shape, dtype=float): + """ + Empty masked array with all elements masked. + + Return an empty masked array of the given shape and dtype, where all the + data are masked. + + Parameters + ---------- + shape : int or tuple of ints + Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``. + dtype : dtype, optional + Data type of the output. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + See Also + -------- + masked_all_like : Empty masked array modelled on an existing array. + + Notes + ----- + Unlike other masked array creation functions (e.g. `numpy.ma.zeros`, + `numpy.ma.ones`, `numpy.ma.full`), `masked_all` does not initialize the + values of the array, and may therefore be marginally faster. However, + the values stored in the newly allocated array are arbitrary. For + reproducible behavior, be sure to set each element of the array before + reading. + + Examples + -------- + >>> np.ma.masked_all((3, 3)) + masked_array( + data=[[--, --, --], + [--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True], + [ True, True, True]], + fill_value=1e+20, + dtype=float64) + + The `dtype` parameter defines the underlying data type. + + >>> a = np.ma.masked_all((3, 3)) + >>> a.dtype + dtype('float64') + >>> a = np.ma.masked_all((3, 3), dtype=np.int32) + >>> a.dtype + dtype('int32') + + """ + a = masked_array(np.empty(shape, dtype), + mask=np.ones(shape, make_mask_descr(dtype))) + return a + + +def masked_all_like(arr): + """ + Empty masked array with the properties of an existing array. + + Return an empty masked array of the same shape and dtype as + the array `arr`, where all the data are masked. + + Parameters + ---------- + arr : ndarray + An array describing the shape and dtype of the required MaskedArray. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + Raises + ------ + AttributeError + If `arr` doesn't have a shape attribute (i.e. not an ndarray) + + See Also + -------- + masked_all : Empty masked array with all elements masked. + + Notes + ----- + Unlike other masked array creation functions (e.g. `numpy.ma.zeros_like`, + `numpy.ma.ones_like`, `numpy.ma.full_like`), `masked_all_like` does not + initialize the values of the array, and may therefore be marginally + faster. However, the values stored in the newly allocated array are + arbitrary. For reproducible behavior, be sure to set each element of the + array before reading. + + Examples + -------- + >>> arr = np.zeros((2, 3), dtype=np.float32) + >>> arr + array([[0., 0., 0.], + [0., 0., 0.]], dtype=float32) + >>> np.ma.masked_all_like(arr) + masked_array( + data=[[--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True]], + fill_value=np.float64(1e+20), + dtype=float32) + + The dtype of the masked array matches the dtype of `arr`. + + >>> arr.dtype + dtype('float32') + >>> np.ma.masked_all_like(arr).dtype + dtype('float32') + + """ + a = np.empty_like(arr).view(MaskedArray) + a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype)) + return a + + +#####-------------------------------------------------------------------------- +#---- --- Standard functions --- +#####-------------------------------------------------------------------------- +class _fromnxfunction: + """ + Defines a wrapper to adapt NumPy functions to masked arrays. + + + An instance of `_fromnxfunction` can be called with the same parameters + as the wrapped NumPy function. The docstring of `newfunc` is adapted from + the wrapped function as well, see `getdoc`. + + This class should not be used directly. Instead, one of its extensions that + provides support for a specific type of input should be used. + + Parameters + ---------- + funcname : str + The name of the function to be adapted. The function should be + in the NumPy namespace (i.e. ``np.funcname``). + + """ + + def __init__(self, funcname): + self.__name__ = funcname + self.__doc__ = self.getdoc() + + def getdoc(self): + """ + Retrieve the docstring and signature from the function. + + The ``__doc__`` attribute of the function is used as the docstring for + the new masked array version of the function. A note on application + of the function to the mask is appended. + + Parameters + ---------- + None + + """ + npfunc = getattr(np, self.__name__, None) + doc = getattr(npfunc, '__doc__', None) + if doc: + sig = ma.get_object_signature(npfunc) + doc = ma.doc_note(doc, "The function is applied to both the _data " + "and the _mask, if any.") + if sig: + sig = self.__name__ + sig + "\n\n" + return sig + doc + return + + def __call__(self, *args, **params): + pass + + +class _fromnxfunction_single(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with a single array + argument followed by auxiliary args that are passed verbatim for + both the data and mask calls. + """ + def __call__(self, x, *args, **params): + func = getattr(np, self.__name__) + if isinstance(x, ndarray): + _d = func(x.__array__(), *args, **params) + _m = func(getmaskarray(x), *args, **params) + return masked_array(_d, mask=_m) + else: + _d = func(np.asarray(x), *args, **params) + _m = func(getmaskarray(x), *args, **params) + return masked_array(_d, mask=_m) + + +class _fromnxfunction_seq(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with a single sequence + of arrays followed by auxiliary args that are passed verbatim for + both the data and mask calls. + """ + def __call__(self, x, *args, **params): + func = getattr(np, self.__name__) + _d = func(tuple([np.asarray(a) for a in x]), *args, **params) + _m = func(tuple([getmaskarray(a) for a in x]), *args, **params) + return masked_array(_d, mask=_m) + + +class _fromnxfunction_args(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with multiple array + arguments. The first non-array-like input marks the beginning of the + arguments that are passed verbatim for both the data and mask calls. + Array arguments are processed independently and the results are + returned in a list. If only one array is found, the return value is + just the processed array instead of a list. + """ + def __call__(self, *args, **params): + func = getattr(np, self.__name__) + arrays = [] + args = list(args) + while len(args) > 0 and issequence(args[0]): + arrays.append(args.pop(0)) + res = [] + for x in arrays: + _d = func(np.asarray(x), *args, **params) + _m = func(getmaskarray(x), *args, **params) + res.append(masked_array(_d, mask=_m)) + if len(arrays) == 1: + return res[0] + return res + + +class _fromnxfunction_allargs(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with multiple array + arguments. Similar to `_fromnxfunction_args` except that all args + are converted to arrays even if they are not so already. This makes + it possible to process scalars as 1-D arrays. Only keyword arguments + are passed through verbatim for the data and mask calls. Arrays + arguments are processed independently and the results are returned + in a list. If only one arg is present, the return value is just the + processed array instead of a list. + """ + def __call__(self, *args, **params): + func = getattr(np, self.__name__) + res = [] + for x in args: + _d = func(np.asarray(x), **params) + _m = func(getmaskarray(x), **params) + res.append(masked_array(_d, mask=_m)) + if len(args) == 1: + return res[0] + return res + + +atleast_1d = _fromnxfunction_allargs('atleast_1d') +atleast_2d = _fromnxfunction_allargs('atleast_2d') +atleast_3d = _fromnxfunction_allargs('atleast_3d') + +vstack = row_stack = _fromnxfunction_seq('vstack') +hstack = _fromnxfunction_seq('hstack') +column_stack = _fromnxfunction_seq('column_stack') +dstack = _fromnxfunction_seq('dstack') +stack = _fromnxfunction_seq('stack') + +hsplit = _fromnxfunction_single('hsplit') + +diagflat = _fromnxfunction_single('diagflat') + + +#####-------------------------------------------------------------------------- +#---- +#####-------------------------------------------------------------------------- +def flatten_inplace(seq): + """Flatten a sequence in place.""" + k = 0 + while (k != len(seq)): + while hasattr(seq[k], '__iter__'): + seq[k:(k + 1)] = seq[k] + k += 1 + return seq + + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + (This docstring should be overwritten) + """ + arr = array(arr, copy=False, subok=True) + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + ind = [0] * (nd - 1) + i = np.zeros(nd, 'O') + indlist = list(range(nd)) + indlist.remove(axis) + i[axis] = slice(None, None) + outshape = np.asarray(arr.shape).take(indlist) + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + # if res is a number, then we have a smaller output array + asscalar = np.isscalar(res) + if not asscalar: + try: + len(res) + except TypeError: + asscalar = True + # Note: we shouldn't set the dtype of the output from the first result + # so we force the type to object, and build a list of dtypes. We'll + # just take the largest, to avoid some downcasting + dtypes = [] + if asscalar: + dtypes.append(np.asarray(res).dtype) + outarr = zeros(outshape, object) + outarr[tuple(ind)] = res + Ntot = np.prod(outshape) + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= outshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(ind)] = res + dtypes.append(asarray(res).dtype) + k += 1 + else: + res = array(res, copy=False, subok=True) + j = i.copy() + j[axis] = ([slice(None, None)] * res.ndim) + j.put(indlist, ind) + Ntot = np.prod(outshape) + holdshape = outshape + outshape = list(arr.shape) + outshape[axis] = res.shape + dtypes.append(asarray(res).dtype) + outshape = flatten_inplace(outshape) + outarr = zeros(outshape, object) + outarr[tuple(flatten_inplace(j.tolist()))] = res + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= holdshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + j.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(flatten_inplace(j.tolist()))] = res + dtypes.append(asarray(res).dtype) + k += 1 + max_dtypes = np.dtype(np.asarray(dtypes).max()) + if not hasattr(arr, '_mask'): + result = np.asarray(outarr, dtype=max_dtypes) + else: + result = asarray(outarr, dtype=max_dtypes) + result.fill_value = ma.default_fill_value(result) + return result +apply_along_axis.__doc__ = np.apply_along_axis.__doc__ + + +def apply_over_axes(func, a, axes): + """ + (This docstring will be overwritten) + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = ma.expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + + +if apply_over_axes.__doc__ is not None: + apply_over_axes.__doc__ = np.apply_over_axes.__doc__[ + :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \ + """ + + Examples + -------- + >>> a = np.ma.arange(24).reshape(2,3,4) + >>> a[:,0,1] = np.ma.masked + >>> a[:,1,:] = np.ma.masked + >>> a + masked_array( + data=[[[0, --, 2, 3], + [--, --, --, --], + [8, 9, 10, 11]], + [[12, --, 14, 15], + [--, --, --, --], + [20, 21, 22, 23]]], + mask=[[[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]], + [[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]]], + fill_value=999999) + >>> np.ma.apply_over_axes(np.ma.sum, a, [0,2]) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1)) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) + """ + + +def average(a, axis=None, weights=None, returned=False, *, + keepdims=np._NoValue): + """ + Return the weighted average of array over the given axis. + + Parameters + ---------- + a : array_like + Data to be averaged. + Masked entries are not taken into account in the computation. + axis : None or int or tuple of ints, optional + Axis or axes along which to average `a`. The default, + `axis=None`, will average over all of the elements of the input array. + If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the average according to its associated weight. + The array of weights must be the same shape as `a` if no axis is + specified, otherwise the weights must have dimensions and shape + consistent with `a` along the specified axis. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + The calculation is:: + + avg = sum(a * weights) / sum(weights) + + where the sum is over all included elements. + The only constraint on the values of `weights` is that `sum(weights)` + must not be 0. + returned : bool, optional + Flag indicating whether a tuple ``(result, sum of weights)`` + should be returned as output (True), or just the result (False). + Default is False. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + *Note:* `keepdims` will not work with instances of `numpy.matrix` + or other classes whose methods do not support `keepdims`. + + .. versionadded:: 1.23.0 + + Returns + ------- + average, [sum_of_weights] : (tuple of) scalar or MaskedArray + The average along the specified axis. When returned is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. The return type is `np.float64` + if `a` is of integer type and floats smaller than `float64`, or the + input data-type, otherwise. If returned, `sum_of_weights` is always + `float64`. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. See `numpy.ma.average` for a + version robust to this type of error. + TypeError + When `weights` does not have the same shape as `a`, and `axis=None`. + ValueError + When `weights` does not have dimensions and shape consistent with `a` + along specified `axis`. + + Examples + -------- + >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) + >>> np.ma.average(a, weights=[3, 1, 0, 0]) + 1.25 + + >>> x = np.ma.arange(6.).reshape(3, 2) + >>> x + masked_array( + data=[[0., 1.], + [2., 3.], + [4., 5.]], + mask=False, + fill_value=1e+20) + >>> data = np.arange(8).reshape((2, 2, 2)) + >>> data + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.ma.average(data, axis=(0, 1), weights=[[1./4, 3./4], [1., 1./2]]) + masked_array(data=[3.4, 4.4], + mask=[False, False], + fill_value=1e+20) + >>> np.ma.average(data, axis=0, weights=[[1./4, 3./4], [1., 1./2]]) + Traceback (most recent call last): + ... + ValueError: Shape of weights must be consistent + with shape of a along specified axis. + + >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], + ... returned=True) + >>> avg + masked_array(data=[2.6666666666666665, 3.6666666666666665], + mask=[False, False], + fill_value=1e+20) + + With ``keepdims=True``, the following result has shape (3, 1). + + >>> np.ma.average(x, axis=1, keepdims=True) + masked_array( + data=[[0.5], + [2.5], + [4.5]], + mask=False, + fill_value=1e+20) + """ + a = asarray(a) + m = getmask(a) + + if axis is not None: + axis = normalize_axis_tuple(axis, a.ndim, argname="axis") + + if keepdims is np._NoValue: + # Don't pass on the keepdims argument if one wasn't given. + keepdims_kw = {} + else: + keepdims_kw = {'keepdims': keepdims} + + if weights is None: + avg = a.mean(axis, **keepdims_kw) + scl = avg.dtype.type(a.count(axis)) + else: + wgt = asarray(weights) + + if issubclass(a.dtype.type, (np.integer, np.bool)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.shape != tuple(a.shape[ax] for ax in axis): + raise ValueError( + "Shape of weights must be consistent with " + "shape of a along specified axis.") + + # setup wgt to broadcast along axis + wgt = wgt.transpose(np.argsort(axis)) + wgt = wgt.reshape(tuple((s if ax in axis else 1) + for ax, s in enumerate(a.shape))) + + if m is not nomask: + wgt = wgt*(~a.mask) + wgt.mask |= a.mask + + scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) + avg = np.multiply(a, wgt, + dtype=result_dtype).sum(axis, **keepdims_kw) / scl + + if returned: + if scl.shape != avg.shape: + scl = np.broadcast_to(scl, avg.shape).copy() + return avg, scl + else: + return avg + + +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : int, optional + Axis along which the medians are computed. The default (None) is + to compute the median along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array (a) for + calculations. The input array will be modified by the call to + median. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. Note that, if `overwrite_input` is True, and the input + is not already an `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + .. versionadded:: 1.10.0 + + Returns + ------- + median : ndarray + A new array holding the result is returned unless out is + specified, in which case a reference to out is returned. + Return data-type is `float64` for integers and floats smaller than + `float64`, or the input data-type, otherwise. + + See Also + -------- + mean + + Notes + ----- + Given a vector ``V`` with ``N`` non masked values, the median of ``V`` + is the middle value of a sorted copy of ``V`` (``Vs``) - i.e. + ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2`` + when ``N`` is even. + + Examples + -------- + >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) + >>> np.ma.median(x) + 1.5 + + >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + >>> np.ma.median(x) + 2.5 + >>> np.ma.median(x, axis=-1, overwrite_input=True) + masked_array(data=[2.0, 5.0], + mask=[False, False], + fill_value=1e+20) + + """ + if not hasattr(a, 'mask'): + m = np.median(getdata(a, subok=True), axis=axis, + out=out, overwrite_input=overwrite_input, + keepdims=keepdims) + if isinstance(m, np.ndarray) and 1 <= m.ndim: + return masked_array(m, copy=False) + else: + return m + + return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _median(a, axis=None, out=None, overwrite_input=False): + # when an unmasked NaN is present return it, so we need to sort the NaN + # values behind the mask + if np.issubdtype(a.dtype, np.inexact): + fill_value = np.inf + else: + fill_value = None + if overwrite_input: + if axis is None: + asorted = a.ravel() + asorted.sort(fill_value=fill_value) + else: + a.sort(axis=axis, fill_value=fill_value) + asorted = a + else: + asorted = sort(a, axis=axis, fill_value=fill_value) + + if axis is None: + axis = 0 + else: + axis = normalize_axis_index(axis, asorted.ndim) + + if asorted.shape[axis] == 0: + # for empty axis integer indices fail so use slicing to get same result + # as median (which is mean of empty slice = nan) + indexer = [slice(None)] * asorted.ndim + indexer[axis] = slice(0, 0) + indexer = tuple(indexer) + return np.ma.mean(asorted[indexer], axis=axis, out=out) + + if asorted.ndim == 1: + idx, odd = divmod(count(asorted), 2) + mid = asorted[idx + odd - 1:idx + 1] + if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0: + # avoid inf / x = masked + s = mid.sum(out=out) + if not odd: + s = np.true_divide(s, 2., casting='safe', out=out) + s = np.lib._utils_impl._median_nancheck(asorted, s, axis) + else: + s = mid.mean(out=out) + + # if result is masked either the input contained enough + # minimum_fill_value so that it would be the median or all values + # masked + if np.ma.is_masked(s) and not np.all(asorted.mask): + return np.ma.minimum_fill_value(asorted) + return s + + counts = count(asorted, axis=axis, keepdims=True) + h = counts // 2 + + # duplicate high if odd number of elements so mean does nothing + odd = counts % 2 == 1 + l = np.where(odd, h, h-1) + + lh = np.concatenate([l,h], axis=axis) + + # get low and high median + low_high = np.take_along_axis(asorted, lh, axis=axis) + + def replace_masked(s): + # Replace masked entries with minimum_full_value unless it all values + # are masked. This is required as the sort order of values equal or + # larger than the fill value is undefined and a valid value placed + # elsewhere, e.g. [4, --, inf]. + if np.ma.is_masked(s): + rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask + s.data[rep] = np.ma.minimum_fill_value(asorted) + s.mask[rep] = False + + replace_masked(low_high) + + if np.issubdtype(asorted.dtype, np.inexact): + # avoid inf / x = masked + s = np.ma.sum(low_high, axis=axis, out=out) + np.true_divide(s.data, 2., casting='unsafe', out=s.data) + + s = np.lib._utils_impl._median_nancheck(asorted, s, axis) + else: + s = np.ma.mean(low_high, axis=axis, out=out) + + return s + + +def compress_nd(x, axis=None): + """Suppress slices from multiple dimensions which contain masked values. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with `mask` + set to `nomask`. + axis : tuple of ints or int, optional + Which dimensions to suppress slices from can be configured with this + parameter. + - If axis is a tuple of ints, those are the axes to suppress slices from. + - If axis is an int, then that is the only axis to suppress slices from. + - If axis is None, all axis are selected. + + Returns + ------- + compress_array : ndarray + The compressed array. + + Examples + -------- + >>> arr = [[1, 2], [3, 4]] + >>> mask = [[0, 1], [0, 0]] + >>> x = np.ma.array(arr, mask=mask) + >>> np.ma.compress_nd(x, axis=0) + array([[3, 4]]) + >>> np.ma.compress_nd(x, axis=1) + array([[1], + [3]]) + >>> np.ma.compress_nd(x) + array([[3]]) + + """ + x = asarray(x) + m = getmask(x) + # Set axis to tuple of ints + if axis is None: + axis = tuple(range(x.ndim)) + else: + axis = normalize_axis_tuple(axis, x.ndim) + + # Nothing is masked: return x + if m is nomask or not m.any(): + return x._data + # All is masked: return empty + if m.all(): + return nxarray([]) + # Filter elements through boolean indexing + data = x._data + for ax in axis: + axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim))) + data = data[(slice(None),)*ax + (~m.any(axis=axes),)] + return data + + +def compress_rowcols(x, axis=None): + """ + Suppress the rows and/or columns of a 2-D array that contain + masked values. + + The suppression behavior is selected with the `axis` parameter. + + - If axis is None, both rows and columns are suppressed. + - If axis is 0, only rows are suppressed. + - If axis is 1 or -1, only columns are suppressed. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. Default is None. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x + masked_array( + data=[[--, 1, 2], + [--, 4, 5], + [6, 7, 8]], + mask=[[ True, False, False], + [ True, False, False], + [False, False, False]], + fill_value=999999) + + >>> np.ma.compress_rowcols(x) + array([[7, 8]]) + >>> np.ma.compress_rowcols(x, 0) + array([[6, 7, 8]]) + >>> np.ma.compress_rowcols(x, 1) + array([[1, 2], + [4, 5], + [7, 8]]) + + """ + if asarray(x).ndim != 2: + raise NotImplementedError("compress_rowcols works for 2D arrays only.") + return compress_nd(x, axis=axis) + + +def compress_rows(a): + """ + Suppress whole rows of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see + `compress_rowcols` for details. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + See Also + -------- + compress_rowcols + + Examples + -------- + >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> np.ma.compress_rows(a) + array([[6, 7, 8]]) + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_rows works for 2D arrays only.") + return compress_rowcols(a, 0) + + +def compress_cols(a): + """ + Suppress whole columns of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see + `compress_rowcols` for details. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + See Also + -------- + compress_rowcols + + Examples + -------- + >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> np.ma.compress_cols(a) + array([[1, 2], + [4, 5], + [7, 8]]) + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_cols works for 2D arrays only.") + return compress_rowcols(a, 1) + + +def mask_rowcols(a, axis=None): + """ + Mask rows and/or columns of a 2D array that contain masked values. + + Mask whole rows and/or columns of a 2D array that contain + masked values. The masking behavior is selected using the + `axis` parameter. + + - If `axis` is None, rows *and* columns are masked. + - If `axis` is 0, only rows are masked. + - If `axis` is 1 or -1, only columns are masked. + + Parameters + ---------- + a : array_like, MaskedArray + The array to mask. If not a MaskedArray instance (or if no array + elements are masked), the result is a MaskedArray with `mask` set + to `nomask` (False). Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. If None, applies to a + flattened version of the array. + + Returns + ------- + a : MaskedArray + A modified version of the input array, masked depending on the value + of the `axis` parameter. + + Raises + ------ + NotImplementedError + If input array `a` is not 2D. + + See Also + -------- + mask_rows : Mask rows of a 2D array that contain masked values. + mask_cols : Mask cols of a 2D array that contain masked values. + masked_where : Mask where a condition is met. + + Notes + ----- + The input array's mask is modified by this function. + + Examples + -------- + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = np.ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> np.ma.mask_rowcols(a) + masked_array( + data=[[0, --, 0], + [--, --, --], + [0, --, 0]], + mask=[[False, True, False], + [ True, True, True], + [False, True, False]], + fill_value=1) + + """ + a = array(a, subok=False) + if a.ndim != 2: + raise NotImplementedError("mask_rowcols works for 2D arrays only.") + m = getmask(a) + # Nothing is masked: return a + if m is nomask or not m.any(): + return a + maskedval = m.nonzero() + a._mask = a._mask.copy() + if not axis: + a[np.unique(maskedval[0])] = masked + if axis in [None, 1, -1]: + a[:, np.unique(maskedval[1])] = masked + return a + + +def mask_rows(a, axis=np._NoValue): + """ + Mask rows of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = np.ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + + >>> np.ma.mask_rows(a) + masked_array( + data=[[0, 0, 0], + [--, --, --], + [0, 0, 0]], + mask=[[False, False, False], + [ True, True, True], + [False, False, False]], + fill_value=1) + + """ + if axis is not np._NoValue: + # remove the axis argument when this deprecation expires + # NumPy 1.18.0, 2019-11-28 + warnings.warn( + "The axis argument has always been ignored, in future passing it " + "will raise TypeError", DeprecationWarning, stacklevel=2) + return mask_rowcols(a, 0) + + +def mask_cols(a, axis=np._NoValue): + """ + Mask columns of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = np.ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> np.ma.mask_cols(a) + masked_array( + data=[[0, --, 0], + [0, --, 0], + [0, --, 0]], + mask=[[False, True, False], + [False, True, False], + [False, True, False]], + fill_value=1) + + """ + if axis is not np._NoValue: + # remove the axis argument when this deprecation expires + # NumPy 1.18.0, 2019-11-28 + warnings.warn( + "The axis argument has always been ignored, in future passing it " + "will raise TypeError", DeprecationWarning, stacklevel=2) + return mask_rowcols(a, 1) + + +#####-------------------------------------------------------------------------- +#---- --- arraysetops --- +#####-------------------------------------------------------------------------- + +def ediff1d(arr, to_end=None, to_begin=None): + """ + Compute the differences between consecutive elements of an array. + + This function is the equivalent of `numpy.ediff1d` that takes masked + values into account, see `numpy.ediff1d` for details. + + See Also + -------- + numpy.ediff1d : Equivalent function for ndarrays. + + Examples + -------- + >>> arr = np.ma.array([1, 2, 4, 7, 0]) + >>> np.ma.ediff1d(arr) + masked_array(data=[ 1, 2, 3, -7], + mask=False, + fill_value=999999) + + """ + arr = ma.asanyarray(arr).flat + ed = arr[1:] - arr[:-1] + arrays = [ed] + # + if to_begin is not None: + arrays.insert(0, to_begin) + if to_end is not None: + arrays.append(to_end) + # + if len(arrays) != 1: + # We'll save ourselves a copy of a potentially large array in the common + # case where neither to_begin or to_end was given. + ed = hstack(arrays) + # + return ed + + +def unique(ar1, return_index=False, return_inverse=False): + """ + Finds the unique elements of an array. + + Masked values are considered the same element (masked). The output array + is always a masked array. See `numpy.unique` for more details. + + See Also + -------- + numpy.unique : Equivalent function for ndarrays. + + Examples + -------- + >>> a = [1, 2, 1000, 2, 3] + >>> mask = [0, 0, 1, 0, 0] + >>> masked_a = np.ma.masked_array(a, mask) + >>> masked_a + masked_array(data=[1, 2, --, 2, 3], + mask=[False, False, True, False, False], + fill_value=999999) + >>> np.ma.unique(masked_a) + masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999) + >>> np.ma.unique(masked_a, return_index=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 4, 2])) + >>> np.ma.unique(masked_a, return_inverse=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 3, 1, 2])) + >>> np.ma.unique(masked_a, return_index=True, return_inverse=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 4, 2]), array([0, 1, 3, 1, 2])) + """ + output = np.unique(ar1, + return_index=return_index, + return_inverse=return_inverse) + if isinstance(output, tuple): + output = list(output) + output[0] = output[0].view(MaskedArray) + output = tuple(output) + else: + output = output.view(MaskedArray) + return output + + +def intersect1d(ar1, ar2, assume_unique=False): + """ + Returns the unique elements common to both arrays. + + Masked values are considered equal one to the other. + The output is always a masked array. + + See `numpy.intersect1d` for more details. + + See Also + -------- + numpy.intersect1d : Equivalent function for ndarrays. + + Examples + -------- + >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + >>> np.ma.intersect1d(x, y) + masked_array(data=[1, 3, --], + mask=[False, False, True], + fill_value=999999) + + """ + if assume_unique: + aux = ma.concatenate((ar1, ar2)) + else: + # Might be faster than unique( intersect1d( ar1, ar2 ) )? + aux = ma.concatenate((unique(ar1), unique(ar2))) + aux.sort() + return aux[:-1][aux[1:] == aux[:-1]] + + +def setxor1d(ar1, ar2, assume_unique=False): + """ + Set exclusive-or of 1-D arrays with unique elements. + + The output is always a masked array. See `numpy.setxor1d` for more details. + + See Also + -------- + numpy.setxor1d : Equivalent function for ndarrays. + + Examples + -------- + >>> ar1 = np.ma.array([1, 2, 3, 2, 4]) + >>> ar2 = np.ma.array([2, 3, 5, 7, 5]) + >>> np.ma.setxor1d(ar1, ar2) + masked_array(data=[1, 4, 5, 7], + mask=False, + fill_value=999999) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = ma.concatenate((ar1, ar2)) + if aux.size == 0: + return aux + aux.sort() + auxf = aux.filled() +# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 + flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) +# flag2 = ediff1d( flag ) == 0 + flag2 = (flag[1:] == flag[:-1]) + return aux[flag2] + + +def in1d(ar1, ar2, assume_unique=False, invert=False): + """ + Test whether each element of an array is also present in a second + array. + + The output is always a masked array. See `numpy.in1d` for more details. + + We recommend using :func:`isin` instead of `in1d` for new code. + + See Also + -------- + isin : Version of this function that preserves the shape of ar1. + numpy.in1d : Equivalent function for ndarrays. + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + >>> ar1 = np.ma.array([0, 1, 2, 5, 0]) + >>> ar2 = [0, 2] + >>> np.ma.in1d(ar1, ar2) + masked_array(data=[ True, False, True, False, True], + mask=False, + fill_value=True) + + """ + if not assume_unique: + ar1, rev_idx = unique(ar1, return_inverse=True) + ar2 = unique(ar2) + + ar = ma.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = ma.concatenate((bool_ar, [invert])) + indx = order.argsort(kind='mergesort')[:len(ar1)] + + if assume_unique: + return flag[indx] + else: + return flag[indx][rev_idx] + + +def isin(element, test_elements, assume_unique=False, invert=False): + """ + Calculates `element in test_elements`, broadcasting over + `element` only. + + The output is always a masked array of the same shape as `element`. + See `numpy.isin` for more details. + + See Also + -------- + in1d : Flattened version of this function. + numpy.isin : Equivalent function for ndarrays. + + Notes + ----- + .. versionadded:: 1.13.0 + + Examples + -------- + >>> element = np.ma.array([1, 2, 3, 4, 5, 6]) + >>> test_elements = [0, 2] + >>> np.ma.isin(element, test_elements) + masked_array(data=[False, True, False, False, False, False], + mask=False, + fill_value=True) + + """ + element = ma.asarray(element) + return in1d(element, test_elements, assume_unique=assume_unique, + invert=invert).reshape(element.shape) + + +def union1d(ar1, ar2): + """ + Union of two arrays. + + The output is always a masked array. See `numpy.union1d` for more details. + + See Also + -------- + numpy.union1d : Equivalent function for ndarrays. + + Examples + -------- + >>> ar1 = np.ma.array([1, 2, 3, 4]) + >>> ar2 = np.ma.array([3, 4, 5, 6]) + >>> np.ma.union1d(ar1, ar2) + masked_array(data=[1, 2, 3, 4, 5, 6], + mask=False, + fill_value=999999) + + """ + return unique(ma.concatenate((ar1, ar2), axis=None)) + + +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Set difference of 1D arrays with unique elements. + + The output is always a masked array. See `numpy.setdiff1d` for more + details. + + See Also + -------- + numpy.setdiff1d : Equivalent function for ndarrays. + + Examples + -------- + >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) + >>> np.ma.setdiff1d(x, [1, 2]) + masked_array(data=[3, --], + mask=[False, True], + fill_value=999999) + + """ + if assume_unique: + ar1 = ma.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] + + +############################################################################### +# Covariance # +############################################################################### + + +def _covhelper(x, y=None, rowvar=True, allow_masked=True): + """ + Private function for the computation of covariance and correlation + coefficients. + + """ + x = ma.array(x, ndmin=2, copy=True, dtype=float) + xmask = ma.getmaskarray(x) + # Quick exit if we can't process masked data + if not allow_masked and xmask.any(): + raise ValueError("Cannot process masked data.") + # + if x.shape[0] == 1: + rowvar = True + # Make sure that rowvar is either 0 or 1 + rowvar = int(bool(rowvar)) + axis = 1 - rowvar + if rowvar: + tup = (slice(None), None) + else: + tup = (None, slice(None)) + # + if y is None: + xnotmask = np.logical_not(xmask).astype(int) + else: + y = array(y, copy=False, ndmin=2, dtype=float) + ymask = ma.getmaskarray(y) + if not allow_masked and ymask.any(): + raise ValueError("Cannot process masked data.") + if xmask.any() or ymask.any(): + if y.shape == x.shape: + # Define some common mask + common_mask = np.logical_or(xmask, ymask) + if common_mask is not nomask: + xmask = x._mask = y._mask = ymask = common_mask + x._sharedmask = False + y._sharedmask = False + x = ma.concatenate((x, y), axis) + xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) + x -= x.mean(axis=rowvar)[tup] + return (x, xnotmask, rowvar) + + +def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): + """ + Estimate the covariance matrix. + + Except for the handling of missing data this function does the same as + `numpy.cov`. For more details and examples, see `numpy.cov`. + + By default, masked values are recognized as such. If `x` and `y` have the + same shape, a common mask is allocated: if ``x[i,j]`` is masked, then + ``y[i,j]`` will also be masked. + Setting `allow_masked` to False will raise an exception if values are + missing in either of the input arrays. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N-1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. This keyword can be overridden by + the keyword ``ddof`` in numpy versions >= 1.5. + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises a `ValueError` exception when some values are missing. + ddof : {None, int}, optional + If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is + the number of observations; this overrides the value implied by + ``bias``. The default value is ``None``. + + .. versionadded:: 1.5 + + Raises + ------ + ValueError + Raised if some values are missing and `allow_masked` is False. + + See Also + -------- + numpy.cov + + Examples + -------- + >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) + >>> y = np.ma.array([[1, 0], [0, 1]], mask=[0, 0, 1, 1]) + >>> np.ma.cov(x, y) + masked_array( + data=[[--, --, --, --], + [--, --, --, --], + [--, --, --, --], + [--, --, --, --]], + mask=[[ True, True, True, True], + [ True, True, True, True], + [ True, True, True, True], + [ True, True, True, True]], + fill_value=1e+20, + dtype=float64) + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError("ddof must be an integer") + # Set up ddof + if ddof is None: + if bias: + ddof = 0 + else: + ddof = 1 + + (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) + if not rowvar: + fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof + result = (dot(x.T, x.conj(), strict=False) / fact).squeeze() + else: + fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof + result = (dot(x, x.T.conj(), strict=False) / fact).squeeze() + return result + + +def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, + ddof=np._NoValue): + """ + Return Pearson product-moment correlation coefficients. + + Except for the handling of missing data this function does the same as + `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises an exception. Because `bias` is deprecated, this + argument needs to be treated as keyword only to avoid a warning. + ddof : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + + See Also + -------- + numpy.corrcoef : Equivalent function in top-level NumPy module. + cov : Estimate the covariance matrix. + + Notes + ----- + This function accepts but discards arguments `bias` and `ddof`. This is + for backwards compatibility with previous versions of this function. These + arguments had no effect on the return values of the function and can be + safely ignored in this and previous versions of numpy. + + Examples + -------- + >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) + >>> np.ma.corrcoef(x) + masked_array( + data=[[--, --], + [--, --]], + mask=[[ True, True], + [ True, True]], + fill_value=1e+20, + dtype=float64) + + """ + msg = 'bias and ddof have no effect and are deprecated' + if bias is not np._NoValue or ddof is not np._NoValue: + # 2015-03-15, 1.10 + warnings.warn(msg, DeprecationWarning, stacklevel=2) + # Get the data + (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) + # Compute the covariance matrix + if not rowvar: + fact = np.dot(xnotmask.T, xnotmask) * 1. + c = (dot(x.T, x.conj(), strict=False) / fact).squeeze() + else: + fact = np.dot(xnotmask, xnotmask.T) * 1. + c = (dot(x, x.T.conj(), strict=False) / fact).squeeze() + # Check whether we have a scalar + try: + diag = ma.diagonal(c) + except ValueError: + return 1 + # + if xnotmask.all(): + _denom = ma.sqrt(ma.multiply.outer(diag, diag)) + else: + _denom = diagflat(diag) + _denom._sharedmask = False # We know return is always a copy + n = x.shape[1 - rowvar] + if rowvar: + for i in range(n - 1): + for j in range(i + 1, n): + _x = mask_cols(vstack((x[i], x[j]))).var(axis=1) + _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) + else: + for i in range(n - 1): + for j in range(i + 1, n): + _x = mask_cols( + vstack((x[:, i], x[:, j]))).var(axis=1) + _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) + return c / _denom + +#####-------------------------------------------------------------------------- +#---- --- Concatenation helpers --- +#####-------------------------------------------------------------------------- + +class MAxisConcatenator(AxisConcatenator): + """ + Translate slice objects to concatenation along an axis. + + For documentation on usage, see `mr_class`. + + See Also + -------- + mr_class + + """ + concatenate = staticmethod(concatenate) + + @classmethod + def makemat(cls, arr): + # There used to be a view as np.matrix here, but we may eventually + # deprecate that class. In preparation, we use the unmasked version + # to construct the matrix (with copy=False for backwards compatibility + # with the .view) + data = super().makemat(arr.data, copy=False) + return array(data, mask=arr.mask) + + def __getitem__(self, key): + # matrix builder syntax, like 'a, b; c, d' + if isinstance(key, str): + raise MAError("Unavailable for masked array.") + + return super().__getitem__(key) + + +class mr_class(MAxisConcatenator): + """ + Translate slice objects to concatenation along the first axis. + + This is the masked array version of `r_`. + + See Also + -------- + r_ + + Examples + -------- + >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] + masked_array(data=[1, 2, 3, ..., 4, 5, 6], + mask=False, + fill_value=999999) + + """ + def __init__(self): + MAxisConcatenator.__init__(self, 0) + +mr_ = mr_class() + + +#####-------------------------------------------------------------------------- +#---- Find unmasked data --- +#####-------------------------------------------------------------------------- + +def ndenumerate(a, compressed=True): + """ + Multidimensional index iterator. + + Return an iterator yielding pairs of array coordinates and values, + skipping elements that are masked. With `compressed=False`, + `ma.masked` is yielded as the value of masked elements. This + behavior differs from that of `numpy.ndenumerate`, which yields the + value of the underlying data array. + + Notes + ----- + .. versionadded:: 1.23.0 + + Parameters + ---------- + a : array_like + An array with (possibly) masked elements. + compressed : bool, optional + If True (default), masked elements are skipped. + + See Also + -------- + numpy.ndenumerate : Equivalent function ignoring any mask. + + Examples + -------- + >>> a = np.ma.arange(9).reshape((3, 3)) + >>> a[1, 0] = np.ma.masked + >>> a[1, 2] = np.ma.masked + >>> a[2, 1] = np.ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, False, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> for index, x in np.ma.ndenumerate(a): + ... print(index, x) + (0, 0) 0 + (0, 1) 1 + (0, 2) 2 + (1, 1) 4 + (2, 0) 6 + (2, 2) 8 + + >>> for index, x in np.ma.ndenumerate(a, compressed=False): + ... print(index, x) + (0, 0) 0 + (0, 1) 1 + (0, 2) 2 + (1, 0) -- + (1, 1) 4 + (1, 2) -- + (2, 0) 6 + (2, 1) -- + (2, 2) 8 + """ + for it, mask in zip(np.ndenumerate(a), getmaskarray(a).flat): + if not mask: + yield it + elif not compressed: + yield it[0], masked + + +def flatnotmasked_edges(a): + """ + Find the indices of the first and last unmasked values. + + Expects a 1-D `MaskedArray`, returns None if all values are masked. + + Parameters + ---------- + a : array_like + Input 1-D `MaskedArray` + + Returns + ------- + edges : ndarray or None + The indices of first and last non-masked value in the array. + Returns None if all values are masked. + + See Also + -------- + flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 1-D arrays. + + Examples + -------- + >>> a = np.ma.arange(10) + >>> np.ma.flatnotmasked_edges(a) + array([0, 9]) + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.flatnotmasked_edges(a) + array([3, 8]) + + >>> a[:] = np.ma.masked + >>> print(np.ma.flatnotmasked_edges(a)) + None + + """ + m = getmask(a) + if m is nomask or not np.any(m): + return np.array([0, a.size - 1]) + unmasked = np.flatnonzero(~m) + if len(unmasked) > 0: + return unmasked[[0, -1]] + else: + return None + + +def notmasked_edges(a, axis=None): + """ + Find the indices of the first and last unmasked values along an axis. + + If all values are masked, return None. Otherwise, return a list + of two tuples, corresponding to the indices of the first and last + unmasked values respectively. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array. + + Returns + ------- + edges : ndarray or list + An array of start and end indexes if there are any masked data in + the array. If there are no masked data in the array, `edges` is a + list of the first and last index. + + See Also + -------- + flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous + clump_masked, clump_unmasked + + Examples + -------- + >>> a = np.arange(9).reshape((3, 3)) + >>> m = np.zeros_like(a) + >>> m[1:, 1:] = 1 + + >>> am = np.ma.array(a, mask=m) + >>> np.array(am[~am.mask]) + array([0, 1, 2, 3, 6]) + + >>> np.ma.notmasked_edges(am) + array([0, 6]) + + """ + a = asarray(a) + if axis is None or a.ndim == 1: + return flatnotmasked_edges(a) + m = getmaskarray(a) + idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) + return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), + tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ] + + +def flatnotmasked_contiguous(a): + """ + Find contiguous unmasked data in a masked array. + + Parameters + ---------- + a : array_like + The input array. + + Returns + ------- + slice_list : list + A sorted sequence of `slice` objects (start index, end index). + + .. versionchanged:: 1.15.0 + Now returns an empty list instead of None for a fully masked array + + See Also + -------- + flatnotmasked_edges, notmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> a = np.ma.arange(10) + >>> np.ma.flatnotmasked_contiguous(a) + [slice(0, 10, None)] + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.flatnotmasked_contiguous(a) + [slice(3, 5, None), slice(6, 9, None)] + >>> a[:] = np.ma.masked + >>> np.ma.flatnotmasked_contiguous(a) + [] + + """ + m = getmask(a) + if m is nomask: + return [slice(0, a.size)] + i = 0 + result = [] + for (k, g) in itertools.groupby(m.ravel()): + n = len(list(g)) + if not k: + result.append(slice(i, i + n)) + i += n + return result + + +def notmasked_contiguous(a, axis=None): + """ + Find contiguous unmasked data in a masked array along the given axis. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array, and this + is the same as `flatnotmasked_contiguous`. + + Returns + ------- + endpoints : list + A list of slices (start and end indexes) of unmasked indexes + in the array. + + If the input is 2d and axis is specified, the result is a list of lists. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> a = np.arange(12).reshape((3, 4)) + >>> mask = np.zeros_like(a) + >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0 + >>> ma = np.ma.array(a, mask=mask) + >>> ma + masked_array( + data=[[0, --, 2, 3], + [--, --, --, 7], + [8, --, --, 11]], + mask=[[False, True, False, False], + [ True, True, True, False], + [False, True, True, False]], + fill_value=999999) + >>> np.array(ma[~ma.mask]) + array([ 0, 2, 3, 7, 8, 11]) + + >>> np.ma.notmasked_contiguous(ma) + [slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)] + + >>> np.ma.notmasked_contiguous(ma, axis=0) + [[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]] + + >>> np.ma.notmasked_contiguous(ma, axis=1) + [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]] + + """ + a = asarray(a) + nd = a.ndim + if nd > 2: + raise NotImplementedError("Currently limited to at most 2D array.") + if axis is None or nd == 1: + return flatnotmasked_contiguous(a) + # + result = [] + # + other = (axis + 1) % 2 + idx = [0, 0] + idx[axis] = slice(None, None) + # + for i in range(a.shape[other]): + idx[other] = i + result.append(flatnotmasked_contiguous(a[tuple(idx)])) + return result + + +def _ezclump(mask): + """ + Finds the clumps (groups of data with the same values) for a 1D bool array. + + Returns a series of slices. + """ + if mask.ndim > 1: + mask = mask.ravel() + idx = (mask[1:] ^ mask[:-1]).nonzero() + idx = idx[0] + 1 + + if mask[0]: + if len(idx) == 0: + return [slice(0, mask.size)] + + r = [slice(0, idx[0])] + r.extend((slice(left, right) + for left, right in zip(idx[1:-1:2], idx[2::2]))) + else: + if len(idx) == 0: + return [] + + r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])] + + if mask[-1]: + r.append(slice(idx[-1], mask.size)) + return r + + +def clump_unmasked(a): + """ + Return list of slices corresponding to the unmasked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of unmasked + elements in `a`. + + Notes + ----- + .. versionadded:: 1.4.0 + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + notmasked_contiguous, clump_masked + + Examples + -------- + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_unmasked(a) + [slice(3, 6, None), slice(7, 8, None)] + + """ + mask = getattr(a, '_mask', nomask) + if mask is nomask: + return [slice(0, a.size)] + return _ezclump(~mask) + + +def clump_masked(a): + """ + Returns a list of slices corresponding to the masked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of masked elements + in `a`. + + Notes + ----- + .. versionadded:: 1.4.0 + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + notmasked_contiguous, clump_unmasked + + Examples + -------- + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_masked(a) + [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)] + + """ + mask = ma.getmask(a) + if mask is nomask: + return [] + return _ezclump(mask) + + +############################################################################### +# Polynomial fit # +############################################################################### + + +def vander(x, n=None): + """ + Masked values in the input array result in rows of zeros. + + """ + _vander = np.vander(x, n) + m = getmask(x) + if m is not nomask: + _vander[m] = 0 + return _vander + +vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Any masked values in x is propagated in y, and vice-versa. + + """ + x = asarray(x) + y = asarray(y) + + m = getmask(x) + if y.ndim == 1: + m = mask_or(m, getmask(y)) + elif y.ndim == 2: + my = getmask(mask_rows(y)) + if my is not nomask: + m = mask_or(m, my[:, 0]) + else: + raise TypeError("Expected a 1D or 2D array for y!") + + if w is not None: + w = asarray(w) + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + m = mask_or(m, getmask(w)) + + if m is not nomask: + not_m = ~m + if w is not None: + w = w[not_m] + return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov) + else: + return np.polyfit(x, y, deg, rcond, full, w, cov) + +polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) diff --git a/phivenv/Lib/site-packages/numpy/ma/extras.pyi b/phivenv/Lib/site-packages/numpy/ma/extras.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1b31bff95c16280e1705c69880a161178460923f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/extras.pyi @@ -0,0 +1,86 @@ +from typing import Any + +from numpy.lib._index_tricks_impl import AxisConcatenator + +from numpy.ma.core import ( + dot as dot, + mask_rowcols as mask_rowcols, +) + +__all__: list[str] + +def count_masked(arr, axis=...): ... +def masked_all(shape, dtype = ...): ... +def masked_all_like(arr): ... + +class _fromnxfunction: + __name__: Any + __doc__: Any + def __init__(self, funcname): ... + def getdoc(self): ... + def __call__(self, *args, **params): ... + +class _fromnxfunction_single(_fromnxfunction): + def __call__(self, x, *args, **params): ... + +class _fromnxfunction_seq(_fromnxfunction): + def __call__(self, x, *args, **params): ... + +class _fromnxfunction_allargs(_fromnxfunction): + def __call__(self, *args, **params): ... + +atleast_1d: _fromnxfunction_allargs +atleast_2d: _fromnxfunction_allargs +atleast_3d: _fromnxfunction_allargs + +vstack: _fromnxfunction_seq +row_stack: _fromnxfunction_seq +hstack: _fromnxfunction_seq +column_stack: _fromnxfunction_seq +dstack: _fromnxfunction_seq +stack: _fromnxfunction_seq + +hsplit: _fromnxfunction_single +diagflat: _fromnxfunction_single + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... +def apply_over_axes(func, a, axes): ... +def average(a, axis=..., weights=..., returned=..., keepdims=...): ... +def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... +def compress_nd(x, axis=...): ... +def compress_rowcols(x, axis=...): ... +def compress_rows(a): ... +def compress_cols(a): ... +def mask_rows(a, axis = ...): ... +def mask_cols(a, axis = ...): ... +def ediff1d(arr, to_end=..., to_begin=...): ... +def unique(ar1, return_index=..., return_inverse=...): ... +def intersect1d(ar1, ar2, assume_unique=...): ... +def setxor1d(ar1, ar2, assume_unique=...): ... +def in1d(ar1, ar2, assume_unique=..., invert=...): ... +def isin(element, test_elements, assume_unique=..., invert=...): ... +def union1d(ar1, ar2): ... +def setdiff1d(ar1, ar2, assume_unique=...): ... +def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... +def corrcoef(x, y=..., rowvar=..., bias = ..., allow_masked=..., ddof = ...): ... + +class MAxisConcatenator(AxisConcatenator): + concatenate: Any + @classmethod + def makemat(cls, arr): ... + def __getitem__(self, key): ... + +class mr_class(MAxisConcatenator): + def __init__(self): ... + +mr_: mr_class + +def ndenumerate(a, compressed=...): ... +def flatnotmasked_edges(a): ... +def notmasked_edges(a, axis=...): ... +def flatnotmasked_contiguous(a): ... +def notmasked_contiguous(a, axis=...): ... +def clump_unmasked(a): ... +def clump_masked(a): ... +def vander(x, n=...): ... +def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... diff --git a/phivenv/Lib/site-packages/numpy/ma/mrecords.py b/phivenv/Lib/site-packages/numpy/ma/mrecords.py new file mode 100644 index 0000000000000000000000000000000000000000..fb772b6e52fbf62436afad60adc7eb27e6f16a80 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/mrecords.py @@ -0,0 +1,782 @@ +""":mod:`numpy.ma..mrecords` + +Defines the equivalent of :class:`numpy.recarrays` for masked arrays, +where fields can be accessed as attributes. +Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes +and the masking of individual fields. + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +# We should make sure that no field is called '_mask','mask','_fieldmask', +# or whatever restricted keywords. An idea would be to no bother in the +# first place, and then rename the invalid fields with a trailing +# underscore. Maybe we could just overload the parser function ? + +from numpy.ma import ( + MAError, MaskedArray, masked, nomask, masked_array, getdata, + getmaskarray, filled +) +import numpy.ma as ma +import warnings + +import numpy as np +from numpy import dtype, ndarray, array as narray + +from numpy._core.records import ( + recarray, fromarrays as recfromarrays, fromrecords as recfromrecords +) + +_byteorderconv = np._core.records._byteorderconv + + +_check_fill_value = ma.core._check_fill_value + + +__all__ = [ + 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords', + 'fromtextfile', 'addfield', +] + +reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] + + +def _checknames(descr, names=None): + """ + Checks that field names ``descr`` are not reserved keywords. + + If this is the case, a default 'f%i' is substituted. If the argument + `names` is not None, updates the field names to valid names. + + """ + ndescr = len(descr) + default_names = ['f%i' % i for i in range(ndescr)] + if names is None: + new_names = default_names + else: + if isinstance(names, (tuple, list)): + new_names = names + elif isinstance(names, str): + new_names = names.split(',') + else: + raise NameError(f'illegal input names {names!r}') + nnames = len(new_names) + if nnames < ndescr: + new_names += default_names[nnames:] + ndescr = [] + for (n, d, t) in zip(new_names, default_names, descr.descr): + if n in reserved_fields: + if t[0] in reserved_fields: + ndescr.append((d, t[1])) + else: + ndescr.append(t) + else: + ndescr.append((n, t[1])) + return np.dtype(ndescr) + + +def _get_fieldmask(self): + mdescr = [(n, '|b1') for n in self.dtype.names] + fdmask = np.empty(self.shape, dtype=mdescr) + fdmask.flat = tuple([False] * len(mdescr)) + return fdmask + + +class MaskedRecords(MaskedArray): + """ + + Attributes + ---------- + _data : recarray + Underlying data, as a record array. + _mask : boolean array + Mask of the records. A record is masked when all its fields are + masked. + _fieldmask : boolean recarray + Record array of booleans, setting the mask of each individual field + of each record. + _fill_value : record + Filling values for each field. + + """ + + def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, + formats=None, names=None, titles=None, + byteorder=None, aligned=False, + mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, + copy=False, + **options): + + self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, + strides=strides, formats=formats, names=names, + titles=titles, byteorder=byteorder, + aligned=aligned,) + + mdtype = ma.make_mask_descr(self.dtype) + if mask is nomask or not np.size(mask): + if not keep_mask: + self._mask = tuple([False] * len(mdtype)) + else: + mask = np.array(mask, copy=copy) + if mask.shape != self.shape: + (nd, nm) = (self.size, mask.size) + if nm == 1: + mask = np.resize(mask, self.shape) + elif nm == nd: + mask = np.reshape(mask, self.shape) + else: + msg = "Mask and data not compatible: data size is %i, " + \ + "mask size is %i." + raise MAError(msg % (nd, nm)) + if not keep_mask: + self.__setmask__(mask) + self._sharedmask = True + else: + if mask.dtype == mdtype: + _mask = mask + else: + _mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + self._mask = _mask + return self + + def __array_finalize__(self, obj): + # Make sure we have a _fieldmask by default + _mask = getattr(obj, '_mask', None) + if _mask is None: + objmask = getattr(obj, '_mask', nomask) + _dtype = ndarray.__getattribute__(self, 'dtype') + if objmask is nomask: + _mask = ma.make_mask_none(self.shape, dtype=_dtype) + else: + mdescr = ma.make_mask_descr(_dtype) + _mask = narray([tuple([m] * len(mdescr)) for m in objmask], + dtype=mdescr).view(recarray) + # Update some of the attributes + _dict = self.__dict__ + _dict.update(_mask=_mask) + self._update_from(obj) + if _dict['_baseclass'] == ndarray: + _dict['_baseclass'] = recarray + return + + @property + def _data(self): + """ + Returns the data as a recarray. + + """ + return ndarray.view(self, recarray) + + @property + def _fieldmask(self): + """ + Alias to mask. + + """ + return self._mask + + def __len__(self): + """ + Returns the length + + """ + # We have more than one record + if self.ndim: + return len(self._data) + # We have only one record: return the nb of fields + return len(self.dtype) + + def __getattribute__(self, attr): + try: + return object.__getattribute__(self, attr) + except AttributeError: + # attr must be a fieldname + pass + fielddict = ndarray.__getattribute__(self, 'dtype').fields + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError( + f'record array has no attribute {attr}') from e + # So far, so good + _localdict = ndarray.__getattribute__(self, '__dict__') + _data = ndarray.view(self, _localdict['_baseclass']) + obj = _data.getfield(*res) + if obj.dtype.names is not None: + raise NotImplementedError("MaskedRecords is currently limited to" + "simple records.") + # Get some special attributes + # Reset the object's mask + hasmasked = False + _mask = _localdict.get('_mask', None) + if _mask is not None: + try: + _mask = _mask[attr] + except IndexError: + # Couldn't find a mask: use the default (nomask) + pass + tp_len = len(_mask.dtype) + hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any() + if (obj.shape or hasmasked): + obj = obj.view(MaskedArray) + obj._baseclass = ndarray + obj._isfield = True + obj._mask = _mask + # Reset the field values + _fill_value = _localdict.get('_fill_value', None) + if _fill_value is not None: + try: + obj._fill_value = _fill_value[attr] + except ValueError: + obj._fill_value = None + else: + obj = obj.item() + return obj + + def __setattr__(self, attr, val): + """ + Sets the attribute attr to the value val. + + """ + # Should we call __setmask__ first ? + if attr in ['mask', 'fieldmask']: + self.__setmask__(val) + return + # Create a shortcut (so that we don't have to call getattr all the time) + _localdict = object.__getattribute__(self, '__dict__') + # Check whether we're creating a new field + newattr = attr not in _localdict + try: + # Is attr a generic attribute ? + ret = object.__setattr__(self, attr, val) + except Exception: + # Not a generic attribute: exit if it's not a valid field + fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + optinfo = ndarray.__getattribute__(self, '_optinfo') or {} + if not (attr in fielddict or attr in optinfo): + raise + else: + # Get the list of names + fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + # Check the attribute + if attr not in fielddict: + return ret + if newattr: + # We just added this one or this setattr worked on an + # internal attribute. + try: + object.__delattr__(self, attr) + except Exception: + return ret + # Let's try to set the field + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError( + f'record array has no attribute {attr}') from e + + if val is masked: + _fill_value = _localdict['_fill_value'] + if _fill_value is not None: + dval = _localdict['_fill_value'][attr] + else: + dval = val + mval = True + else: + dval = filled(val) + mval = getmaskarray(val) + obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res) + _localdict['_mask'].__setitem__(attr, mval) + return obj + + def __getitem__(self, indx): + """ + Returns all the fields sharing the same fieldname base. + + The fieldname base is either `_data` or `_mask`. + + """ + _localdict = self.__dict__ + _mask = ndarray.__getattribute__(self, '_mask') + _data = ndarray.view(self, _localdict['_baseclass']) + # We want a field + if isinstance(indx, str): + # Make sure _sharedmask is True to propagate back to _fieldmask + # Don't use _set_mask, there are some copies being made that + # break propagation Don't force the mask to nomask, that wreaks + # easy masking + obj = _data[indx].view(MaskedArray) + obj._mask = _mask[indx] + obj._sharedmask = True + fval = _localdict['_fill_value'] + if fval is not None: + obj._fill_value = fval[indx] + # Force to masked if the mask is True + if not obj.ndim and obj._mask: + return masked + return obj + # We want some elements. + # First, the data. + obj = np.asarray(_data[indx]).view(mrecarray) + obj._mask = np.asarray(_mask[indx]).view(recarray) + return obj + + def __setitem__(self, indx, value): + """ + Sets the given record to value. + + """ + MaskedArray.__setitem__(self, indx, value) + if isinstance(indx, str): + self._mask[indx] = ma.getmaskarray(value) + + def __str__(self): + """ + Calculates the string representation. + + """ + if self.size > 1: + mstr = [f"({','.join([str(i) for i in s])})" + for s in zip(*[getattr(self, f) for f in self.dtype.names])] + return f"[{', '.join(mstr)}]" + else: + mstr = [f"{','.join([str(i) for i in s])}" + for s in zip([getattr(self, f) for f in self.dtype.names])] + return f"({', '.join(mstr)})" + + def __repr__(self): + """ + Calculates the repr representation. + + """ + _names = self.dtype.names + fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,) + reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] + reprstr.insert(0, 'masked_records(') + reprstr.extend([fmt % (' fill_value', self.fill_value), + ' )']) + return str("\n".join(reprstr)) + + def view(self, dtype=None, type=None): + """ + Returns a view of the mrecarray. + + """ + # OK, basic copy-paste from MaskedArray.view. + if dtype is None: + if type is None: + output = ndarray.view(self) + else: + output = ndarray.view(self, type) + # Here again. + elif type is None: + try: + if issubclass(dtype, ndarray): + output = ndarray.view(self, dtype) + else: + output = ndarray.view(self, dtype) + # OK, there's the change + except TypeError: + dtype = np.dtype(dtype) + # we need to revert to MaskedArray, but keeping the possibility + # of subclasses (eg, TimeSeriesRecords), so we'll force a type + # set to the first parent + if dtype.fields is None: + basetype = self.__class__.__bases__[0] + output = self.__array__().view(dtype, basetype) + output._update_from(self) + else: + output = ndarray.view(self, dtype) + output._fill_value = None + else: + output = ndarray.view(self, dtype, type) + # Update the mask, just like in MaskedArray.view + if (getattr(output, '_mask', nomask) is not nomask): + mdtype = ma.make_mask_descr(output.dtype) + output._mask = self._mask.view(mdtype, ndarray) + output._mask.shape = output.shape + return output + + def harden_mask(self): + """ + Forces the mask to hard. + + """ + self._hardmask = True + + def soften_mask(self): + """ + Forces the mask to soft + + """ + self._hardmask = False + + def copy(self): + """ + Returns a copy of the masked record. + + """ + copied = self._data.copy().view(type(self)) + copied._mask = self._mask.copy() + return copied + + def tolist(self, fill_value=None): + """ + Return the data portion of the array as a list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to fill_value. If fill_value is None, + the corresponding entries in the output list will be ``None``. + + """ + if fill_value is not None: + return self.filled(fill_value).tolist() + result = narray(self.filled().tolist(), dtype=object) + mask = narray(self._mask.tolist()) + result[mask] = None + return result.tolist() + + def __getstate__(self): + """Return the internal state of the masked array. + + This is for pickling. + + """ + state = (1, + self.shape, + self.dtype, + self.flags.fnc, + self._data.tobytes(), + self._mask.tobytes(), + self._fill_value, + ) + return state + + def __setstate__(self, state): + """ + Restore the internal state of the masked array. + + This is for pickling. ``state`` is typically the output of the + ``__getstate__`` output, and is a 5-tuple: + + - class name + - a tuple giving the shape of the data + - a typecode for the data + - a binary string for the data + - a binary string for the mask. + + """ + (ver, shp, typ, isf, raw, msk, flv) = state + ndarray.__setstate__(self, (shp, typ, isf, raw)) + mdtype = dtype([(k, np.bool) for (k, _) in self.dtype.descr]) + self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) + self.fill_value = flv + + def __reduce__(self): + """ + Return a 3-tuple for pickling a MaskedArray. + + """ + return (_mrreconstruct, + (self.__class__, self._baseclass, (0,), 'b',), + self.__getstate__()) + + +def _mrreconstruct(subtype, baseclass, baseshape, basetype,): + """ + Build a new MaskedArray from the information stored in a pickle. + + """ + _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype) + _mask = ndarray.__new__(ndarray, baseshape, 'b1') + return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + +mrecarray = MaskedRecords + + +############################################################################### +# Constructors # +############################################################################### + + +def fromarrays(arraylist, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, + fill_value=None): + """ + Creates a mrecarray from a (flat) list of masked arrays. + + Parameters + ---------- + arraylist : sequence + A list of (masked) arrays. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None, integer}, optional + Number of records. If None, shape is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + datalist = [getdata(x) for x in arraylist] + masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist] + _array = recfromarrays(datalist, + dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, aligned=aligned, + byteorder=byteorder).view(mrecarray) + _array._mask.flat = list(zip(*masklist)) + if fill_value is not None: + _array.fill_value = fill_value + return _array + + +def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None, + fill_value=None, mask=nomask): + """ + Creates a MaskedRecords from a list of records. + + Parameters + ---------- + reclist : sequence + A list of records. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None,int}, optional + Number of records. If None, ``shape`` is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + mask : {nomask, sequence}, optional. + External mask to apply on the data. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + # Grab the initial _fieldmask, if needed: + _mask = getattr(reclist, '_mask', None) + # Get the list of records. + if isinstance(reclist, ndarray): + # Make sure we don't have some hidden mask + if isinstance(reclist, MaskedArray): + reclist = reclist.filled().view(ndarray) + # Grab the initial dtype, just in case + if dtype is None: + dtype = reclist.dtype + reclist = reclist.tolist() + mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, + aligned=aligned, byteorder=byteorder).view(mrecarray) + # Set the fill_value if needed + if fill_value is not None: + mrec.fill_value = fill_value + # Now, let's deal w/ the mask + if mask is not nomask: + mask = np.asarray(mask) + maskrecordlength = len(mask.dtype) + if maskrecordlength: + mrec._mask.flat = mask + elif mask.ndim == 2: + mrec._mask.flat = [tuple(m) for m in mask] + else: + mrec.__setmask__(mask) + if _mask is not None: + mrec._mask[:] = _mask + return mrec + + +def _guessvartypes(arr): + """ + Tries to guess the dtypes of the str_ ndarray `arr`. + + Guesses by testing element-wise conversion. Returns a list of dtypes. + The array is first converted to ndarray. If the array is 2D, the test + is performed on the first line. An exception is raised if the file is + 3D or more. + + """ + vartypes = [] + arr = np.asarray(arr) + if arr.ndim == 2: + arr = arr[0] + elif arr.ndim > 2: + raise ValueError("The array should be 2D at most!") + # Start the conversion loop. + for f in arr: + try: + int(f) + except (ValueError, TypeError): + try: + float(f) + except (ValueError, TypeError): + try: + complex(f) + except (ValueError, TypeError): + vartypes.append(arr.dtype) + else: + vartypes.append(np.dtype(complex)) + else: + vartypes.append(np.dtype(float)) + else: + vartypes.append(np.dtype(int)) + return vartypes + + +def openfile(fname): + """ + Opens the file handle of file `fname`. + + """ + # A file handle + if hasattr(fname, 'readline'): + return fname + # Try to open the file and guess its type + try: + f = open(fname) + except FileNotFoundError as e: + raise FileNotFoundError(f"No such file: '{fname}'") from e + if f.readline()[:2] != "\\x": + f.seek(0, 0) + return f + f.close() + raise NotImplementedError("Wow, binary file") + + +def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', + varnames=None, vartypes=None, + *, delimitor=np._NoValue): # backwards compatibility + """ + Creates a mrecarray from data stored in the file `filename`. + + Parameters + ---------- + fname : {file name/handle} + Handle of an opened file. + delimiter : {None, string}, optional + Alphanumeric character used to separate columns in the file. + If None, any (group of) white spacestring(s) will be used. + commentchar : {'#', string}, optional + Alphanumeric character used to mark the start of a comment. + missingchar : {'', string}, optional + String indicating missing data, and used to create the masks. + varnames : {None, sequence}, optional + Sequence of the variable names. If None, a list will be created from + the first non empty line of the file. + vartypes : {None, sequence}, optional + Sequence of the variables dtypes. If None, it will be estimated from + the first non-commented line. + + + Ultra simple: the varnames are in the header, one line""" + if delimitor is not np._NoValue: + if delimiter is not None: + raise TypeError("fromtextfile() got multiple values for argument " + "'delimiter'") + # NumPy 1.22.0, 2021-09-23 + warnings.warn("The 'delimitor' keyword argument of " + "numpy.ma.mrecords.fromtextfile() is deprecated " + "since NumPy 1.22.0, use 'delimiter' instead.", + DeprecationWarning, stacklevel=2) + delimiter = delimitor + + # Try to open the file. + ftext = openfile(fname) + + # Get the first non-empty line as the varnames + while True: + line = ftext.readline() + firstline = line[:line.find(commentchar)].strip() + _varnames = firstline.split(delimiter) + if len(_varnames) > 1: + break + if varnames is None: + varnames = _varnames + + # Get the data. + _variables = masked_array([line.strip().split(delimiter) for line in ftext + if line[0] != commentchar and len(line) > 1]) + (_, nfields) = _variables.shape + ftext.close() + + # Try to guess the dtype. + if vartypes is None: + vartypes = _guessvartypes(_variables[0]) + else: + vartypes = [np.dtype(v) for v in vartypes] + if len(vartypes) != nfields: + msg = "Attempting to %i dtypes for %i fields!" + msg += " Reverting to default." + warnings.warn(msg % (len(vartypes), nfields), stacklevel=2) + vartypes = _guessvartypes(_variables[0]) + + # Construct the descriptor. + mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)] + mfillv = [ma.default_fill_value(f) for f in vartypes] + + # Get the data and the mask. + # We just need a list of masked_arrays. It's easier to create it like that: + _mask = (_variables.T == missingchar) + _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f) + for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] + + return fromarrays(_datalist, dtype=mdescr) + + +def addfield(mrecord, newfield, newfieldname=None): + """Adds a new field to the masked record array + + Uses `newfield` as data and `newfieldname` as name. If `newfieldname` + is None, the new field name is set to 'fi', where `i` is the number of + existing fields. + + """ + _data = mrecord._data + _mask = mrecord._mask + if newfieldname is None or newfieldname in reserved_fields: + newfieldname = 'f%i' % len(_data.dtype) + newfield = ma.array(newfield) + # Get the new data. + # Create a new empty recarray + newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) + newdata = recarray(_data.shape, newdtype) + # Add the existing field + [newdata.setfield(_data.getfield(*f), *f) + for f in _data.dtype.fields.values()] + # Add the new field + newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) + newdata = newdata.view(MaskedRecords) + # Get the new mask + # Create a new empty recarray + newmdtype = np.dtype([(n, np.bool) for n in newdtype.names]) + newmask = recarray(_data.shape, newmdtype) + # Add the old masks + [newmask.setfield(_mask.getfield(*f), *f) + for f in _mask.dtype.fields.values()] + # Add the mask of the new field + newmask.setfield(getmaskarray(newfield), + *newmask.dtype.fields[newfieldname]) + newdata._mask = newmask + return newdata diff --git a/phivenv/Lib/site-packages/numpy/ma/mrecords.pyi b/phivenv/Lib/site-packages/numpy/ma/mrecords.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2f210ff9767329524af818dd3d033ffbcb3ce9b1 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/mrecords.pyi @@ -0,0 +1,90 @@ +from typing import Any, TypeVar + +from numpy import dtype +from numpy.ma import MaskedArray + +__all__: list[str] + +# TODO: Set the `bound` to something more suitable once we +# have proper shape support +_ShapeType = TypeVar("_ShapeType", bound=Any) +_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) + +class MaskedRecords(MaskedArray[_ShapeType, _DType_co]): + def __new__( + cls, + shape, + dtype=..., + buf=..., + offset=..., + strides=..., + formats=..., + names=..., + titles=..., + byteorder=..., + aligned=..., + mask=..., + hard_mask=..., + fill_value=..., + keep_mask=..., + copy=..., + **options, + ): ... + _mask: Any + _fill_value: Any + @property + def _data(self): ... + @property + def _fieldmask(self): ... + def __array_finalize__(self, obj): ... + def __len__(self): ... + def __getattribute__(self, attr): ... + def __setattr__(self, attr, val): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def view(self, dtype=..., type=...): ... + def harden_mask(self): ... + def soften_mask(self): ... + def copy(self): ... + def tolist(self, fill_value=...): ... + def __reduce__(self): ... + +mrecarray = MaskedRecords + +def fromarrays( + arraylist, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + fill_value=..., +): ... + +def fromrecords( + reclist, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + fill_value=..., + mask=..., +): ... + +def fromtextfile( + fname, + delimiter=..., + commentchar=..., + missingchar=..., + varnames=..., + vartypes=..., + # NOTE: deprecated: NumPy 1.22.0, 2021-09-23 + # delimitor=..., +): ... + +def addfield(mrecord, newfield, newfieldname=...): ... diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/__init__.py b/phivenv/Lib/site-packages/numpy/ma/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46807d268ad35bee1fb18bbd33cdccd7697936cf Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_arrayobject.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_arrayobject.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f43ad863917a341d764ddc67e774107b51890c14 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_arrayobject.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_deprecations.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_deprecations.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53d9296ec14501be56d0cac4ab183b90c27d302d Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_deprecations.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_extras.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_extras.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71de6dbd6f25711554d2bdf46dc6b2ee6bc852a7 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_extras.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_mrecords.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_mrecords.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea065858fe259964ac12b46f8210045be6c8131d Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_mrecords.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_old_ma.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_old_ma.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49289854adc362e78aff906e10f8498f3ae09f78 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_old_ma.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_regression.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_regression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62c7b47ce3c30743e60631d00a7beec4670bf9f9 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_regression.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_subclassing.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_subclassing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33afb6193845864736bcf6b648db40cc2612f3e8 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/ma/tests/__pycache__/test_subclassing.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/test_arrayobject.py b/phivenv/Lib/site-packages/numpy/ma/tests/test_arrayobject.py new file mode 100644 index 0000000000000000000000000000000000000000..8b2d98d6b43e9bfb8cd43a1b7971a77fb2bf3dac --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/tests/test_arrayobject.py @@ -0,0 +1,40 @@ +import pytest + +import numpy as np +from numpy.ma import masked_array +from numpy.testing import assert_array_equal + + +def test_matrix_transpose_raises_error_for_1d(): + msg = "matrix transpose with ndim < 2 is undefined" + ma_arr = masked_array(data=[1, 2, 3, 4, 5, 6], + mask=[1, 0, 1, 1, 1, 0]) + with pytest.raises(ValueError, match=msg): + ma_arr.mT + + +def test_matrix_transpose_equals_transpose_2d(): + ma_arr = masked_array(data=[[1, 2, 3], [4, 5, 6]], + mask=[[1, 0, 1], [1, 1, 0]]) + assert_array_equal(ma_arr.T, ma_arr.mT) + + +ARRAY_SHAPES_TO_TEST = ( + (5, 2), + (5, 2, 3), + (5, 2, 3, 4), +) + + +@pytest.mark.parametrize("shape", ARRAY_SHAPES_TO_TEST) +def test_matrix_transpose_equals_swapaxes(shape): + num_of_axes = len(shape) + vec = np.arange(shape[-1]) + arr = np.broadcast_to(vec, shape) + + rng = np.random.default_rng(42) + mask = rng.choice([0, 1], size=shape) + ma_arr = masked_array(data=arr, mask=mask) + + tgt = np.swapaxes(arr, num_of_axes - 2, num_of_axes - 1) + assert_array_equal(tgt, ma_arr.mT) diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/test_core.py b/phivenv/Lib/site-packages/numpy/ma/tests/test_core.py new file mode 100644 index 0000000000000000000000000000000000000000..7ecfbaf6970759e8be955699eb11fb0a37f2bafd --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/tests/test_core.py @@ -0,0 +1,5702 @@ +# pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102 +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +""" +__author__ = "Pierre GF Gerard-Marchant" + +import sys +import warnings +import copy +import operator +import itertools +import textwrap +import pickle +from functools import reduce + +import pytest + +import numpy as np +import numpy.ma.core +import numpy._core.fromnumeric as fromnumeric +import numpy._core.umath as umath +from numpy.exceptions import AxisError +from numpy.testing import ( + assert_raises, assert_warns, suppress_warnings, IS_WASM + ) +from numpy.testing._private.utils import requires_memory +from numpy import ndarray +from numpy._utils import asbytes +from numpy.ma.testutils import ( + assert_, assert_array_equal, assert_equal, assert_almost_equal, + assert_equal_records, fail_if_equal, assert_not_equal, + assert_mask_equal + ) +from numpy.ma.core import ( + MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all, + allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2, + arcsin, arctan, argsort, array, asarray, choose, concatenate, + conjugate, cos, cosh, count, default_fill_value, diag, divide, doc_note, + empty, empty_like, equal, exp, flatten_mask, filled, fix_invalid, + flatten_structured_array, fromflex, getmask, getmaskarray, greater, + greater_equal, identity, inner, isMaskedArray, less, less_equal, log, + log10, make_mask, make_mask_descr, mask_or, masked, masked_array, + masked_equal, masked_greater, masked_greater_equal, masked_inside, + masked_less, masked_less_equal, masked_not_equal, masked_outside, + masked_print_option, masked_values, masked_where, max, maximum, + maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply, + mvoid, nomask, not_equal, ones, ones_like, outer, power, product, put, + putmask, ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, + sqrt, subtract, sum, take, tan, tanh, transpose, where, zeros, zeros_like, + ) + +pi = np.pi + + +suppress_copy_mask_on_assignment = suppress_warnings() +suppress_copy_mask_on_assignment.filter( + numpy.ma.core.MaskedArrayFutureWarning, + "setting an item on a masked array which has a shared mask will not copy") + + +# For parametrized numeric testing +num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] +num_ids = [dt_.char for dt_ in num_dts] + + +class TestMaskedArray: + # Base test class for MaskedArrays. + + def setup_method(self): + # Base data definition. + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + a10 = 10. + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + z = np.array([-.5, 0., .5, .8]) + zm = masked_array(z, mask=[0, 1, 0, 0]) + xf = np.where(m1, 1e+20, x) + xm.set_fill_value(1e+20) + self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) + + def test_basicattributes(self): + # Tests some basic array attributes. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a.ndim, 1) + assert_equal(b.ndim, 1) + assert_equal(a.size, 3) + assert_equal(b.size, 3) + assert_equal(a.shape, (3,)) + assert_equal(b.shape, (3,)) + + def test_basic0d(self): + # Checks masking a scalar + x = masked_array(0) + assert_equal(str(x), '0') + x = masked_array(0, mask=True) + assert_equal(str(x), str(masked_print_option)) + x = masked_array(0, mask=False) + assert_equal(str(x), '0') + x = array(0, mask=1) + assert_(x.filled().dtype is x._data.dtype) + + def test_basic1d(self): + # Test of basic array creation and properties in 1 dimension. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_((xm - ym).filled(0).any()) + fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) + s = x.shape + assert_equal(np.shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.dtype, x.dtype) + assert_equal(zm.dtype, z.dtype) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_array_equal(xm, xf) + assert_array_equal(filled(xm, 1.e20), xf) + assert_array_equal(x, xm) + + def test_basic2d(self): + # Test of basic array creation and properties in 2 dimensions. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + for s in [(4, 3), (6, 2)]: + x.shape = s + y.shape = s + xm.shape = s + ym.shape = s + xf.shape = s + + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_equal(shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_equal(xm, xf) + assert_equal(filled(xm, 1.e20), xf) + assert_equal(x, xm) + + def test_concatenate_basic(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # basic concatenation + assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) + assert_equal(np.concatenate((x, y)), concatenate((x, y))) + assert_equal(np.concatenate((x, y)), concatenate((xm, y))) + assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) + + def test_concatenate_alongaxis(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # Concatenation along an axis + s = (3, 4) + x.shape = y.shape = xm.shape = ym.shape = s + assert_equal(xm.mask, np.reshape(m1, s)) + assert_equal(ym.mask, np.reshape(m2, s)) + xmym = concatenate((xm, ym), 1) + assert_equal(np.concatenate((x, y), 1), xmym) + assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) + + x = zeros(2) + y = array(ones(2), mask=[False, True]) + z = concatenate((x, y)) + assert_array_equal(z, [0, 0, 1, 1]) + assert_array_equal(z.mask, [False, False, False, True]) + z = concatenate((y, x)) + assert_array_equal(z, [1, 1, 0, 0]) + assert_array_equal(z.mask, [False, True, False, False]) + + def test_concatenate_flexible(self): + # Tests the concatenation on flexible arrays. + data = masked_array(list(zip(np.random.rand(10), + np.arange(10))), + dtype=[('a', float), ('b', int)]) + + test = concatenate([data[:5], data[5:]]) + assert_equal_records(test, data) + + def test_creation_ndmin(self): + # Check the use of ndmin + x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) + assert_equal(x.shape, (1, 3)) + assert_equal(x._data, [[1, 2, 3]]) + assert_equal(x._mask, [[1, 0, 0]]) + + def test_creation_ndmin_from_maskedarray(self): + # Make sure we're not losing the original mask w/ ndmin + x = array([1, 2, 3]) + x[-1] = masked + xx = array(x, ndmin=2, dtype=float) + assert_equal(x.shape, x._mask.shape) + assert_equal(xx.shape, xx._mask.shape) + + def test_creation_maskcreation(self): + # Tests how masks are initialized at the creation of Maskedarrays. + data = arange(24, dtype=float) + data[[3, 6, 15]] = masked + dma_1 = MaskedArray(data) + assert_equal(dma_1.mask, data.mask) + dma_2 = MaskedArray(dma_1) + assert_equal(dma_2.mask, dma_1.mask) + dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) + fail_if_equal(dma_3.mask, dma_1.mask) + + x = array([1, 2, 3], mask=True) + assert_equal(x._mask, [True, True, True]) + x = array([1, 2, 3], mask=False) + assert_equal(x._mask, [False, False, False]) + y = array([1, 2, 3], mask=x._mask, copy=False) + assert_(np.may_share_memory(x.mask, y.mask)) + y = array([1, 2, 3], mask=x._mask, copy=True) + assert_(not np.may_share_memory(x.mask, y.mask)) + x = array([1, 2, 3], mask=None) + assert_equal(x._mask, [False, False, False]) + + def test_masked_singleton_array_creation_warns(self): + # The first works, but should not (ideally), there may be no way + # to solve this, however, as long as `np.ma.masked` is an ndarray. + np.array(np.ma.masked) + with pytest.warns(UserWarning): + # Tries to create a float array, using `float(np.ma.masked)`. + # We may want to define this is invalid behaviour in the future! + # (requiring np.ma.masked to be a known NumPy scalar probably + # with a DType.) + np.array([3., np.ma.masked]) + + def test_creation_with_list_of_maskedarrays(self): + # Tests creating a masked array from a list of masked arrays. + x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) + + x.mask = nomask + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_(data.mask is nomask) + + def test_creation_with_list_of_maskedarrays_no_bool_cast(self): + # Tests the regression in gh-18551 + masked_str = np.ma.masked_array(['a', 'b'], mask=[True, False]) + normal_int = np.arange(2) + res = np.ma.asarray([masked_str, normal_int], dtype="U21") + assert_array_equal(res.mask, [[True, False], [False, False]]) + + # The above only failed due a long chain of oddity, try also with + # an object array that cannot be converted to bool always: + class NotBool(): + def __bool__(self): + raise ValueError("not a bool!") + masked_obj = np.ma.masked_array([NotBool(), 'b'], mask=[True, False]) + # Check that the NotBool actually fails like we would expect: + with pytest.raises(ValueError, match="not a bool!"): + np.asarray([masked_obj], dtype=bool) + + res = np.ma.asarray([masked_obj, normal_int]) + assert_array_equal(res.mask, [[True, False], [False, False]]) + + def test_creation_from_ndarray_with_padding(self): + x = np.array([('A', 0)], dtype={'names':['f0','f1'], + 'formats':['S4','i8'], + 'offsets':[0,8]}) + array(x) # used to fail due to 'V' padding field in x.dtype.descr + + def test_unknown_keyword_parameter(self): + with pytest.raises(TypeError, match="unexpected keyword argument"): + MaskedArray([1, 2, 3], maks=[0, 1, 0]) # `mask` is misspelled. + + def test_asarray(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xm.fill_value = -9999 + xm._hardmask = True + xmm = asarray(xm) + assert_equal(xmm._data, xm._data) + assert_equal(xmm._mask, xm._mask) + assert_equal(xmm.fill_value, xm.fill_value) + assert_equal(xmm._hardmask, xm._hardmask) + + def test_asarray_default_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m) + assert_(new_m.flags.c_contiguous) + + def test_asarray_enforce_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m, order='C') + assert_(new_m.flags.c_contiguous) + + def test_fix_invalid(self): + # Checks fix_invalid. + with np.errstate(invalid='ignore'): + data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) + data_fixed = fix_invalid(data) + assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) + assert_equal(data_fixed._mask, [1., 0., 1.]) + + def test_maskedelement(self): + # Test of masked element + x = arange(6) + x[1] = masked + assert_(str(masked) == '--') + assert_(x[1] is masked) + assert_equal(filled(x[1], 0), 0) + + def test_set_element_as_object(self): + # Tests setting elements with object + a = empty(1, dtype=object) + x = (1, 2, 3, 4, 5) + a[0] = x + assert_equal(a[0], x) + assert_(a[0] is x) + + import datetime + dt = datetime.datetime.now() + a[0] = dt + assert_(a[0] is dt) + + def test_indexing(self): + # Tests conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_equal(np.sort(x1), sort(x2, endwith=False)) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_equal(x1[2], x2[2]) + assert_equal(x1[2:5], x2[2:5]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[2] = 9 + x2[2] = 9 + assert_equal(x1, x2) + x1[1:3] = 99 + x2[1:3] = 99 + assert_equal(x1, x2) + x2[1] = masked + assert_equal(x1, x2) + x2[1:3] = masked + assert_equal(x1, x2) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_equal(3.0, x2.fill_value) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_setitem_no_warning(self): + # Setitem shouldn't warn, because the assignment might be masked + # and warning for a masked assignment is weird (see gh-23000) + # (When the value is masked, otherwise a warning would be acceptable + # but is not given currently.) + x = np.ma.arange(60).reshape((6, 10)) + index = (slice(1, 5, 2), [7, 5]) + value = np.ma.masked_all((2, 2)) + value._data[...] = np.inf # not a valid integer... + x[index] = value + # The masked scalar is special cased, but test anyway (it's NaN): + x[...] = np.ma.masked + # Finally, a large value that cannot be cast to the float32 `x` + x = np.ma.arange(3., dtype=np.float32) + value = np.ma.array([2e234, 1, 1], mask=[True, False, False]) + x[...] = value + x[[0, 1, 2]] = value + + @suppress_copy_mask_on_assignment + def test_copy(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=True) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_equal(y1._data.__array_interface__, x1.__array_interface__) + assert_(allequal(x1, y1.data)) + assert_equal(y1._mask.__array_interface__, m.__array_interface__) + + y1a = array(y1) + # Default for masked array is not to copy; see gh-10318. + assert_(y1a._data.__array_interface__ == + y1._data.__array_interface__) + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3) + assert_(y2._data.__array_interface__ == x1.__array_interface__) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._data.__array_interface__ != x1.__array_interface__) + #assert_( y2a._mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + #assert_( y2a._mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_equal(concatenate([x4, x4]), y4) + assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) + y6 = repeat(x4, 2, axis=0) + assert_equal(y5, y6) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert_equal(y5, y7) + y8 = x4.repeat(2, 0) + assert_equal(y5, y8) + + y9 = x4.copy() + assert_equal(y9._data, x4._data) + assert_equal(y9._mask, x4._mask) + + x = masked_array([1, 2, 3], mask=[0, 1, 0]) + # Copy is False by default + y = masked_array(x) + assert_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) + y = masked_array(x, copy=True) + assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) + + def test_copy_0d(self): + # gh-9430 + x = np.ma.array(43, mask=True) + xc = x.copy() + assert_equal(xc.mask, True) + + def test_copy_on_python_builtins(self): + # Tests copy works on python builtins (issue#8019) + assert_(isMaskedArray(np.ma.copy([1,2,3]))) + assert_(isMaskedArray(np.ma.copy((1,2,3)))) + + def test_copy_immutable(self): + # Tests that the copy method is immutable, GitHub issue #5247 + a = np.ma.array([1, 2, 3]) + b = np.ma.array([4, 5, 6]) + a_copy_method = a.copy + b.copy + assert_equal(a_copy_method(), [1, 2, 3]) + + def test_deepcopy(self): + from copy import deepcopy + a = array([0, 1, 2], mask=[False, True, False]) + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + assert_not_equal(id(a._mask), id(copied._mask)) + + copied[1] = 1 + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + copied.mask[1] = False + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + def test_format(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(format(a), "[0 -- 2]") + assert_equal(format(masked), "--") + assert_equal(format(masked, ""), "--") + + # Postponed from PR #15410, perhaps address in the future. + # assert_equal(format(masked, " >5"), " --") + # assert_equal(format(masked, " <5"), "-- ") + + # Expect a FutureWarning for using format_spec with MaskedElement + with assert_warns(FutureWarning): + with_format_string = format(masked, " >5") + assert_equal(with_format_string, "--") + + def test_str_repr(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999)''') + ) + + # arrays with a continuation + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, --, ..., 1997, 1998, 1999], + mask=[False, True, True, ..., False, False, False], + fill_value=999999)''') + ) + + # line-wrapped 1d arrays are correctly aligned + a = np.ma.arange(20) + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19], + mask=False, + fill_value=999999)''') + ) + + # 2d arrays cause wrapping + a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) + a[1,1] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent(f'''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value={np.array(999999)[()]!r}, + dtype=int8)''') + ) + + # but not it they're a row vector + assert_equal( + repr(a[:1]), + textwrap.dedent(f'''\ + masked_array(data=[[1, 2, 3]], + mask=[[False, False, False]], + fill_value={np.array(999999)[()]!r}, + dtype=int8)''') + ) + + # dtype=int is implied, so not shown + assert_equal( + repr(a.astype(int)), + textwrap.dedent('''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value=999999)''') + ) + + def test_str_repr_legacy(self): + oldopts = np.get_printoptions() + np.set_printoptions(legacy='1.13') + try: + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' + ' mask = [False True False],\n' + ' fill_value = 999999)\n') + + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n' + ' mask = [False True True ..., False False False],\n' + ' fill_value = 999999)\n' + ) + finally: + np.set_printoptions(**oldopts) + + def test_0d_unicode(self): + u = 'caf\xe9' + utype = type(u) + + arr_nomask = np.ma.array(u) + arr_masked = np.ma.array(u, mask=True) + + assert_equal(utype(arr_nomask), u) + assert_equal(utype(arr_masked), '--') + + def test_pickling(self): + # Tests pickling + for dtype in (int, float, str, object): + a = arange(10).astype(dtype) + a.fill_value = 999 + + masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked + True, # Fully masked + False) # Fully unmasked + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + for mask in masks: + a.mask = mask + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled._data, a._data) + if dtype in (object, int): + assert_equal(a_pickled.fill_value, 999) + else: + assert_equal(a_pickled.fill_value, dtype(999)) + assert_array_equal(a_pickled.mask, mask) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + x = np.array([(1.0, 2), (3.0, 4)], + dtype=[('x', float), ('y', int)]).view(np.recarray) + a = masked_array(x, mask=[(True, False), (False, True)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.recarray)) + + def test_pickling_maskedconstant(self): + # Test pickling MaskedConstant + mc = np.ma.masked + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + mc_pickled = pickle.loads(pickle.dumps(mc, protocol=proto)) + assert_equal(mc_pickled._baseclass, mc._baseclass) + assert_equal(mc_pickled._mask, mc._mask) + assert_equal(mc_pickled._data, mc._data) + + def test_pickling_wstructured(self): + # Tests pickling w/ structured array + a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], + dtype=[('a', int), ('b', float)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + + def test_pickling_keepalignment(self): + # Tests pickling w/ F_CONTIGUOUS arrays + a = arange(10) + a.shape = (-1, 2) + b = a.T + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + test = pickle.loads(pickle.dumps(b, protocol=proto)) + assert_equal(test, b) + + def test_single_element_subscript(self): + # Tests single element subscripts of Maskedarrays. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + def test_topython(self): + # Tests some communication issues with Python. + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + + with suppress_warnings() as sup: + sup.filter(UserWarning, 'Warning: converting a masked element') + assert_(np.isnan(float(array([1], mask=[1])))) + + a = array([1, 2, 3], mask=[1, 0, 0]) + assert_raises(TypeError, lambda: float(a)) + assert_equal(float(a[-1]), 3.) + assert_(np.isnan(float(a[0]))) + assert_raises(TypeError, int, a) + assert_equal(int(a[-1]), 3) + assert_raises(MAError, lambda:int(a[0])) + + def test_oddfeatures_1(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_equal(z.real, x) + assert_equal(z.imag, 10 * x) + assert_equal((z * conjugate(z)).real, 101 * x * x) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_equal(x, z) + + def test_oddfeatures_2(self): + # Tests some more features. + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + @suppress_copy_mask_on_assignment + def test_oddfeatures_3(self): + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) + + def test_filled_with_object_dtype(self): + a = np.ma.masked_all(1, dtype='O') + assert_equal(a.filled('x')[0], 'x') + + def test_filled_with_flexible_dtype(self): + # Test filled w/ flexible dtype + flexi = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + flexi[0] = masked + assert_equal(flexi.filled(), + np.array([(default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),)], dtype=flexi.dtype)) + flexi[0] = masked + assert_equal(flexi.filled(1), + np.array([(1, '1', 1.)], dtype=flexi.dtype)) + + def test_filled_with_mvoid(self): + # Test filled w/ mvoid + ndtype = [('a', int), ('b', float)] + a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) + # Filled using default + test = a.filled() + assert_equal(tuple(test), (1, default_fill_value(1.))) + # Explicit fill_value + test = a.filled((-1, -1)) + assert_equal(tuple(test), (1, -1)) + # Using predefined filling values + a.fill_value = (-999, -999) + assert_equal(tuple(a.filled()), (1, -999)) + + def test_filled_with_nested_dtype(self): + # Test filled w/ nested dtype + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([(1, (1, 1)), (2, (2, 2))], + mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) + test = a.filled(0) + control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) + assert_equal(test, control) + + test = a['B'].filled(0) + control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) + assert_equal(test, control) + + # test if mask gets set correctly (see #6760) + Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))])) + assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)), + ('f1', 'i1', (2, 2))], (2, 2))])) + assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)), + ('f1', '?', (2, 2))], (2, 2))])) + + def test_filled_with_f_order(self): + # Test filled w/ F-contiguous array + a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), + mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'), + order='F') # this is currently ignored + assert_(a.flags['F_CONTIGUOUS']) + assert_(a.filled(0).flags['F_CONTIGUOUS']) + + def test_optinfo_propagation(self): + # Checks that _optinfo dictionary isn't back-propagated + x = array([1, 2, 3, ], dtype=float) + x._optinfo['info'] = '???' + y = x.copy() + assert_equal(y._optinfo['info'], '???') + y._optinfo['info'] = '!!!' + assert_equal(x._optinfo['info'], '???') + + def test_optinfo_forward_propagation(self): + a = array([1,2,2,4]) + a._optinfo["key"] = "value" + assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], a[:2]._optinfo["key"]) + assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"]) + assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"]) + + def test_fancy_printoptions(self): + # Test printing a masked array w/ fancy dtype. + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = array([(1, (2, 3.0)), (4, (5, 6.0))], + mask=[(1, (0, 1)), (0, (1, 0))], + dtype=fancydtype) + control = "[(--, (2, --)) (4, (--, 6.0))]" + assert_equal(str(test), control) + + # Test 0-d array with multi-dimensional dtype + t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]], + 0.0), + mask = (False, [[True, False, True], + [False, False, True]], + False), + dtype = "int, (2,3)float, float") + control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" + assert_equal(str(t_2d0), control) + + def test_flatten_structured_array(self): + # Test flatten_structured_array on arrays + # On ndarray + ndtype = [('a', int), ('b', float)] + a = np.array([(1, 1), (2, 2)], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[1., 1.], [2., 2.]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + # On masked_array + a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1.], [2., 2.]], + mask=[[0, 1], [1, 0]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # On masked array with nested structure + ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] + a = array([(1, (1, 1.1)), (2, (2, 2.2))], + mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1., 1.1], [2., 2., 2.2]], + mask=[[0, 1, 0], [1, 0, 1]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # Keeping the initial shape + ndtype = [('a', int), ('b', float)] + a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + + def test_void0d(self): + # Test creating a mvoid object + ndtype = [('a', int), ('b', int)] + a = np.array([(1, 2,)], dtype=ndtype)[0] + f = mvoid(a) + assert_(isinstance(f, mvoid)) + + a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] + assert_(isinstance(a, mvoid)) + + a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + f = mvoid(a._data[0], a._mask[0]) + assert_(isinstance(f, mvoid)) + + def test_mvoid_getitem(self): + # Test mvoid.__getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + f = a[0] + assert_(isinstance(f, mvoid)) + assert_equal((f[0], f['a']), (1, 1)) + assert_equal(f['b'], 2) + # w/ mask + f = a[1] + assert_(isinstance(f, mvoid)) + assert_(f[0] is masked) + assert_(f['a'] is masked) + assert_equal(f[1], 4) + + # exotic dtype + A = masked_array(data=[([0,1],)], + mask=[([True, False],)], + dtype=[("A", ">i2", (2,))]) + assert_equal(A[0]["A"], A["A"][0]) + assert_equal(A[0]["A"], masked_array(data=[0, 1], + mask=[True, False], dtype=">i2")) + + def test_mvoid_iter(self): + # Test iteration on __getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + assert_equal(list(a[0]), [1, 2]) + # w/ mask + assert_equal(list(a[1]), [masked, 4]) + + def test_mvoid_print(self): + # Test printing a mvoid + mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) + assert_equal(str(mx[0]), "(1, 1)") + mx['b'][0] = masked + ini_display = masked_print_option._display + masked_print_option.set_display("-X-") + try: + assert_equal(str(mx[0]), "(1, -X-)") + assert_equal(repr(mx[0]), "(1, -X-)") + finally: + masked_print_option.set_display(ini_display) + + # also check if there are object datatypes (see gh-7493) + mx = array([(1,), (2,)], dtype=[('a', 'O')]) + assert_equal(str(mx[0]), "(1,)") + + def test_mvoid_multidim_print(self): + + # regression test for gh-6019 + t_ma = masked_array(data = [([1, 2, 3],)], + mask = [([False, True, False],)], + fill_value = ([999999, 999999, 999999],), + dtype = [('a', ' 1: + assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) + assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) + assert_equal(np.sum(x, 1), sum(x, 1)) + assert_equal(np.prod(x, 1), product(x, 1)) + + def test_binops_d2D(self): + # Test binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a * b + control = array([[2., 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a * b + control = array([[2, 3], [8, 10], [18, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2, 3], [8, 10], [18, 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_domained_binops_d2D(self): + # Test domained binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a / b + control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a / b + control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_noshrinking(self): + # Check that we don't shrink a mask when not wanted + # Binary operations + a = masked_array([1., 2., 3.], mask=[False, False, False], + shrink=False) + b = a + 1 + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a += 1 + assert_equal(a.mask, [0, 0, 0]) + # Domained binary operation + b = a / 1. + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a /= 1. + assert_equal(a.mask, [0, 0, 0]) + + def test_ufunc_nomask(self): + # check the case ufuncs should set the mask to false + m = np.ma.array([1]) + # check we don't get array([False], dtype=bool) + assert_equal(np.true_divide(m, 5).mask.shape, ()) + + def test_noshink_on_creation(self): + # Check that the mask is not shrunk on array creation when not wanted + a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False) + assert_equal(a.mask, [0, 0, 0]) + + def test_mod(self): + # Tests mod + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_equal(mod(x, y), mod(xm, ym)) + test = mod(ym, xm) + assert_equal(test, np.mod(ym, xm)) + assert_equal(test.mask, mask_or(xm.mask, ym.mask)) + test = mod(xm, ym) + assert_equal(test, np.mod(xm, ym)) + assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) + + def test_TakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) + assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) + assert_equal(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y)) + assert_equal(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y)) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_imag_real(self): + # Check complex + xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) + assert_equal(xx.imag, [10, 2]) + assert_equal(xx.imag.filled(), [1e+20, 2]) + assert_equal(xx.imag.dtype, xx._data.imag.dtype) + assert_equal(xx.real, [1, 20]) + assert_equal(xx.real.filled(), [1e+20, 20]) + assert_equal(xx.real.dtype, xx._data.real.dtype) + + def test_methods_with_output(self): + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) + + for funcname in funclist: + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + # A ndarray as explicit input + output = np.empty(4, dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty(4, dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + assert_(output[0] is masked) + + def test_eq_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] == a) + assert_equal(test.data, [[True, False], [False, False]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_ne_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] != a) + assert_equal(test.data, [[False, True], [True, True]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_eq_ne_structured_with_non_masked(self): + a = array([(1, 1), (2, 2), (3, 4)], + mask=[(0, 1), (0, 0), (1, 1)], dtype='i4,i4') + eq = a == a.data + ne = a.data != a + # Test the obvious. + assert_(np.all(eq)) + assert_(not np.any(ne)) + # Expect the mask set only for items with all fields masked. + expected_mask = a.mask == np.ones((), a.mask.dtype) + assert_array_equal(eq.mask, expected_mask) + assert_array_equal(ne.mask, expected_mask) + # The masked element will indicated not equal, because the + # masks did not match. + assert_equal(eq.data, [True, True, False]) + assert_array_equal(eq.data, ~ne.data) + + def test_eq_ne_structured_extra(self): + # ensure simple examples are symmetric and make sense. + # from https://github.com/numpy/numpy/pull/8590#discussion_r101126465 + dt = np.dtype('i4,i4') + for m1 in (mvoid((1, 2), mask=(0, 0), dtype=dt), + mvoid((1, 2), mask=(0, 1), dtype=dt), + mvoid((1, 2), mask=(1, 0), dtype=dt), + mvoid((1, 2), mask=(1, 1), dtype=dt)): + ma1 = m1.view(MaskedArray) + r1 = ma1.view('2i4') + for m2 in (np.array((1, 1), dtype=dt), + mvoid((1, 1), dtype=dt), + mvoid((1, 0), mask=(0, 1), dtype=dt), + mvoid((3, 2), mask=(0, 1), dtype=dt)): + ma2 = m2.view(MaskedArray) + r2 = ma2.view('2i4') + eq_expected = (r1 == r2).all() + assert_equal(m1 == m2, eq_expected) + assert_equal(m2 == m1, eq_expected) + assert_equal(ma1 == m2, eq_expected) + assert_equal(m1 == ma2, eq_expected) + assert_equal(ma1 == ma2, eq_expected) + # Also check it is the same if we do it element by element. + el_by_el = [m1[name] == m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).all(), eq_expected) + ne_expected = (r1 != r2).any() + assert_equal(m1 != m2, ne_expected) + assert_equal(m2 != m1, ne_expected) + assert_equal(ma1 != m2, ne_expected) + assert_equal(m1 != ma2, ne_expected) + assert_equal(ma1 != ma2, ne_expected) + el_by_el = [m1[name] != m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) + + @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_eq_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_ne_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_eq_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize("op", [operator.eq, operator.lt]) + def test_eq_broadcast_with_unmasked(self, op): + a = array([0, 1], mask=[0, 1]) + b = np.arange(10).reshape(5, 2) + result = op(a, b) + assert_(result.mask.shape == b.shape) + assert_equal(result.mask, np.zeros(b.shape, bool) | a.mask) + + @pytest.mark.parametrize("op", [operator.eq, operator.gt]) + def test_comp_no_mask_not_broadcast(self, op): + # Regression test for failing doctest in MaskedArray.nonzero + # after gh-24556. + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + result = op(a, 3) + assert_(not result.mask.shape) + assert_(result.mask is nomask) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_ne_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + @pytest.mark.parametrize('op', + [operator.le, operator.lt, operator.ge, operator.gt]) + def test_comparisons_for_numeric(self, op, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = op(a, a) + assert_equal(test.data, op(a._data, a._data)) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = op(a, a[0]) + assert_equal(test.data, op(a._data, a._data[0])) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = op(a, b) + assert_equal(test.data, op(a._data, b._data)) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = op(a[0], b) + assert_equal(test.data, op(a._data[0], b._data)) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = op(b, a[0]) + assert_equal(test.data, op(b._data, a._data[0])) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('op', + [operator.le, operator.lt, operator.ge, operator.gt]) + @pytest.mark.parametrize('fill', [None, "N/A"]) + def test_comparisons_strings(self, op, fill): + # See gh-21770, mask propagation is broken for strings (and some other + # cases) so we explicitly test strings here. + # In principle only == and != may need special handling... + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill) + assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + + def test_eq_with_None(self): + # Really, comparisons with None should not be done, but check them + # anyway. Note that pep8 will flag these tests. + # Deprecation is in place for arrays, and when it happens this + # test will fail (and have to be changed accordingly). + + # With partial mask + with suppress_warnings() as sup: + sup.filter(FutureWarning, "Comparison to `None`") + a = array([None, 1], mask=[0, 1]) + assert_equal(a == None, array([True, False], mask=[0, 1])) + assert_equal(a.data == None, [True, False]) + assert_equal(a != None, array([False, True], mask=[0, 1])) + # With nomask + a = array([None, 1], mask=False) + assert_equal(a == None, [True, False]) + assert_equal(a != None, [False, True]) + # With complete mask + a = array([None, 2], mask=True) + assert_equal(a == None, array([False, True], mask=True)) + assert_equal(a != None, array([True, False], mask=True)) + # Fully masked, even comparison to None should return "masked" + a = masked + assert_equal(a == None, masked) + + def test_eq_with_scalar(self): + a = array(1) + assert_equal(a == 1, True) + assert_equal(a == 0, False) + assert_equal(a != 1, False) + assert_equal(a != 0, True) + b = array(1, mask=True) + assert_equal(b == 0, masked) + assert_equal(b == 1, masked) + assert_equal(b != 0, masked) + assert_equal(b != 1, masked) + + def test_eq_different_dimensions(self): + m1 = array([1, 1], mask=[0, 1]) + # test comparison with both masked and regular arrays. + for m2 in (array([[0, 1], [1, 2]]), + np.array([[0, 1], [1, 2]])): + test = (m1 == m2) + assert_equal(test.data, [[False, False], + [True, False]]) + assert_equal(test.mask, [[False, True], + [False, True]]) + + def test_numpyarithmetic(self): + # Check that the mask is not back-propagated when using numpy functions + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + control = masked_array([np.nan, np.nan, 0, np.log(2), -1], + mask=[1, 1, 0, 0, 1]) + + test = log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + test = np.log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + +class TestMaskedArrayAttributes: + + def test_keepmask(self): + # Tests the keep mask flag + x = masked_array([1, 2, 3], mask=[1, 0, 0]) + mx = masked_array(x) + assert_equal(mx.mask, x.mask) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) + assert_equal(mx.mask, [0, 1, 0]) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) + assert_equal(mx.mask, [1, 1, 0]) + # We default to true + mx = masked_array(x, mask=[0, 1, 0]) + assert_equal(mx.mask, [1, 1, 0]) + + def test_hardmask(self): + # Test hard_mask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + # We need to copy, to avoid updating d in xh ! + xs = array(d, mask=m, hard_mask=False, copy=True) + xh[[1, 4]] = [10, 40] + xs[[1, 4]] = [10, 40] + assert_equal(xh._data, [0, 10, 2, 3, 4]) + assert_equal(xs._data, [0, 10, 2, 3, 40]) + assert_equal(xs.mask, [0, 0, 0, 1, 0]) + assert_(xh._hardmask) + assert_(not xs._hardmask) + xh[1:4] = [10, 20, 30] + xs[1:4] = [10, 20, 30] + assert_equal(xh._data, [0, 10, 20, 3, 4]) + assert_equal(xs._data, [0, 10, 20, 30, 40]) + assert_equal(xs.mask, nomask) + xh[0] = masked + xs[0] = masked + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, [1, 0, 0, 0, 0]) + xh[:] = 1 + xs[:] = 1 + assert_equal(xh._data, [0, 1, 1, 3, 4]) + assert_equal(xs._data, [1, 1, 1, 1, 1]) + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, nomask) + # Switch to soft mask + xh.soften_mask() + xh[:] = arange(5) + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh.mask, nomask) + # Switch back to hard mask + xh.harden_mask() + xh[xh < 3] = masked + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + xh[filled(xh > 1, False)] = 5 + assert_equal(xh._data, [0, 1, 2, 5, 5]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + + xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) + xh[0] = 0 + assert_equal(xh._data, [[1, 0], [3, 4]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[-1, -1] = 5 + assert_equal(xh._data, [[1, 0], [3, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[filled(xh < 5, False)] = 2 + assert_equal(xh._data, [[1, 2], [2, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + + def test_hardmask_again(self): + # Another test of hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + xh[4:5] = 999 + xh[0:1] = 999 + assert_equal(xh._data, [999, 1, 2, 3, 4]) + + def test_hardmask_oncemore_yay(self): + # OK, yet another test of hardmask + # Make sure that harden_mask/soften_mask//unshare_mask returns self + a = array([1, 2, 3], mask=[1, 0, 0]) + b = a.harden_mask() + assert_equal(a, b) + b[0] = 0 + assert_equal(a, b) + assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) + a = b.soften_mask() + a[0] = 0 + assert_equal(a, b) + assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) + + def test_smallmask(self): + # Checks the behaviour of _smallmask + a = arange(10) + a[1] = masked + a[1] = 1 + assert_equal(a._mask, nomask) + a = arange(10) + a._smallmask = False + a[1] = masked + a[1] = 1 + assert_equal(a._mask, zeros(10)) + + def test_shrink_mask(self): + # Tests .shrink_mask() + a = array([1, 2, 3], mask=[0, 0, 0]) + b = a.shrink_mask() + assert_equal(a, b) + assert_equal(a.mask, nomask) + + # Mask cannot be shrunk on structured types, so is a no-op + a = np.ma.array([(1, 2.0)], [('a', int), ('b', float)]) + b = a.copy() + a.shrink_mask() + assert_equal(a.mask, b.mask) + + def test_flat(self): + # Test that flat can return all types of items [#4585, #4615] + # test 2-D record array + # ... on structured array w/ masked records + x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')], + [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]], + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x['a'][0, 1] = masked + x['b'][1, 0] = masked + x['c'][0, 2] = masked + x[-1, -1] = masked + xflat = x.flat + assert_equal(xflat[0], x[0, 0]) + assert_equal(xflat[1], x[0, 1]) + assert_equal(xflat[2], x[0, 2]) + assert_equal(xflat[:3], x[0]) + assert_equal(xflat[3], x[1, 0]) + assert_equal(xflat[4], x[1, 1]) + assert_equal(xflat[5], x[1, 2]) + assert_equal(xflat[3:], x[1]) + assert_equal(xflat[-1], x[-1, -1]) + i = 0 + j = 0 + for xf in xflat: + assert_equal(xf, x[j, i]) + i += 1 + if i >= x.shape[-1]: + i = 0 + j += 1 + + def test_assign_dtype(self): + # check that the mask's dtype is updated when dtype is changed + a = np.zeros(4, dtype='f4,i4') + + m = np.ma.array(a) + m.dtype = np.dtype('f4') + repr(m) # raises? + assert_equal(m.dtype, np.dtype('f4')) + + # check that dtype changes that change shape of mask too much + # are not allowed + def assign(): + m = np.ma.array(a) + m.dtype = np.dtype('f8') + assert_raises(ValueError, assign) + + b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises? + assert_equal(b.dtype, np.dtype('f4')) + + # check that nomask is preserved + a = np.zeros(4, dtype='f4') + m = np.ma.array(a) + m.dtype = np.dtype('f4,i4') + assert_equal(m.dtype, np.dtype('f4,i4')) + assert_equal(m._mask, np.ma.nomask) + + +class TestFillingValues: + + def test_check_on_scalar(self): + # Test _check_fill_value set to valid and invalid values + _check_fill_value = np.ma.core._check_fill_value + + fval = _check_fill_value(0, int) + assert_equal(fval, 0) + fval = _check_fill_value(None, int) + assert_equal(fval, default_fill_value(0)) + + fval = _check_fill_value(0, "|S3") + assert_equal(fval, b"0") + fval = _check_fill_value(None, "|S3") + assert_equal(fval, default_fill_value(b"camelot!")) + assert_raises(TypeError, _check_fill_value, 1e+20, int) + assert_raises(TypeError, _check_fill_value, 'stuff', int) + + def test_check_on_fields(self): + # Tests _check_fill_value with records + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('a', int), ('b', float), ('c', "|S3")] + # A check on a list should return a single record + fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # A check on None should output the defaults + fval = _check_fill_value(None, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [default_fill_value(0), + default_fill_value(0.), + asbytes(default_fill_value("0"))]) + #.....Using a structured type as fill_value should work + fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using a flexible type w/ a different type shouldn't matter + # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured + # types by position + fill_val = np.array((-999, -12345678.9, "???"), + dtype=[("A", int), ("B", float), ("C", "|S3")]) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using an object-array shouldn't matter either + fill_val = np.ndarray(shape=(1,), dtype=object) + fill_val[0] = (-999, -12345678.9, b"???") + fval = _check_fill_value(fill_val, object) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # NOTE: This test was never run properly as "fill_value" rather than + # "fill_val" was assigned. Written properly, it fails. + #fill_val = np.array((-999, -12345678.9, "???")) + #fval = _check_fill_value(fill_val, ndtype) + #assert_(isinstance(fval, ndarray)) + #assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + #.....One-field-only flexible type should work as well + ndtype = [("a", int)] + fval = _check_fill_value(-999999999, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), (-999999999,)) + + def test_fillvalue_conversion(self): + # Tests the behavior of fill_value during conversion + # We had a tailored comment to make sure special attributes are + # properly dealt with + a = array([b'3', b'4', b'5']) + a._optinfo.update({'comment':"updated!"}) + + b = array(a, dtype=int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + + b = array(a, dtype=float) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0.)) + + b = a.astype(int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + assert_equal(b._optinfo['comment'], "updated!") + + b = a.astype([('a', '|S3')]) + assert_equal(b['a']._data, a._data) + assert_equal(b['a'].fill_value, a.fill_value) + + def test_default_fill_value(self): + # check all calling conventions + f1 = default_fill_value(1.) + f2 = default_fill_value(np.array(1.)) + f3 = default_fill_value(np.array(1.).dtype) + assert_equal(f1, f2) + assert_equal(f1, f3) + + def test_default_fill_value_structured(self): + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + + f1 = default_fill_value(fields) + f2 = default_fill_value(fields.dtype) + expected = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.)), dtype=fields.dtype) + assert_equal(f1, expected) + assert_equal(f2, expected) + + def test_default_fill_value_void(self): + dt = np.dtype([('v', 'V7')]) + f = default_fill_value(dt) + assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v'])) + + def test_fillvalue(self): + # Yet more fun with the fill_value + data = masked_array([1, 2, 3], fill_value=-999) + series = data[[0, 2, 1]] + assert_equal(series._fill_value, data._fill_value) + + mtype = [('f', float), ('s', '|S3')] + x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) + x.fill_value = 999 + assert_equal(x.fill_value.item(), [999., b'999']) + assert_equal(x['f'].fill_value, 999) + assert_equal(x['s'].fill_value, b'999') + + x.fill_value = (9, '???') + assert_equal(x.fill_value.item(), (9, b'???')) + assert_equal(x['f'].fill_value, 9) + assert_equal(x['s'].fill_value, b'???') + + x = array([1, 2, 3.1]) + x.fill_value = 999 + assert_equal(np.asarray(x.fill_value).dtype, float) + assert_equal(x.fill_value, 999.) + assert_equal(x._fill_value, np.array(999.)) + + def test_subarray_fillvalue(self): + # gh-10483 test multi-field index fill value + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + with suppress_warnings() as sup: + sup.filter(FutureWarning, "Numpy has detected") + subfields = fields[['i', 'f']] + assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) + # test comparison does not raise: + subfields[1:] == subfields[:-1] + + def test_fillvalue_exotic_dtype(self): + # Tests yet more exotic flexible dtypes + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('i', int), ('s', '|S8'), ('f', float)] + control = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),), + dtype=ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + # The shape shouldn't matter + ndtype = [('f0', float, (2, 2))] + control = np.array((default_fill_value(0.),), + dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + + ndtype = np.dtype("int, (2,3)float, float") + control = np.array((default_fill_value(0), + default_fill_value(0.), + default_fill_value(0.),), + dtype="int, float, float").astype(ndtype) + test = _check_fill_value(None, ndtype) + assert_equal(test, control) + control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + # but when indexing, fill value should become scalar not tuple + # See issue #6723 + M = masked_array(control) + assert_equal(M["f1"].fill_value.ndim, 0) + + def test_fillvalue_datetime_timedelta(self): + # Test default fillvalue for datetime64 and timedelta64 types. + # See issue #4476, this would return '?' which would cause errors + # elsewhere + + for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m", + "h", "D", "W", "M", "Y"): + control = numpy.datetime64("NaT", timecode) + test = default_fill_value(numpy.dtype(" 0 + + # test different unary domains + sqrt(m) + log(m) + tan(m) + arcsin(m) + arccos(m) + arccosh(m) + + # test binary domains + divide(m, 2) + + # also check that allclose uses ma ufuncs, to avoid warning + allclose(m, 0.5) + +class TestMaskedArrayInPlaceArithmetic: + # Test MaskedArray Arithmetic + + def setup_method(self): + x = arange(10) + y = arange(10) + xm = arange(10) + xm[2] = masked + self.intdata = (x, y, xm) + self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) + self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + self.othertypes = [np.dtype(_).type for _ in self.othertypes] + self.uint8data = ( + x.astype(np.uint8), + y.astype(np.uint8), + xm.astype(np.uint8) + ) + + def test_inplace_addition_scalar(self): + # Test of inplace additions + (x, y, xm) = self.intdata + xm[2] = masked + x += 1 + assert_equal(x, y + 1) + xm += 1 + assert_equal(xm, y + 1) + + (x, _, xm) = self.floatdata + id1 = x.data.ctypes.data + x += 1. + assert_(id1 == x.data.ctypes.data) + assert_equal(x, y + 1.) + + def test_inplace_addition_array(self): + # Test of inplace additions + (x, y, xm) = self.intdata + m = xm.mask + a = arange(10, dtype=np.int16) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_subtraction_scalar(self): + # Test of inplace subtractions + (x, y, xm) = self.intdata + x -= 1 + assert_equal(x, y - 1) + xm -= 1 + assert_equal(xm, y - 1) + + def test_inplace_subtraction_array(self): + # Test of inplace subtractions + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_multiplication_scalar(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + x *= 2.0 + assert_equal(x, y * 2) + xm *= 2.0 + assert_equal(xm, y * 2) + + def test_inplace_multiplication_array(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_division_scalar_int(self): + # Test of inplace division + (x, y, xm) = self.intdata + x = arange(10) * 2 + xm = arange(10) * 2 + xm[2] = masked + x //= 2 + assert_equal(x, y) + xm //= 2 + assert_equal(xm, y) + + def test_inplace_division_scalar_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + x /= 2.0 + assert_equal(x, y / 2.0) + xm /= arange(10) + assert_equal(xm, ones((10,))) + + def test_inplace_division_array_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x /= a + xm /= a + assert_equal(x, y / a) + assert_equal(xm, y / a) + assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) + + def test_inplace_division_misc(self): + + x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] + y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + + z = xm / ym + assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + xm = xm.copy() + xm /= ym + assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + def test_datafriendly_add(self): + # Test keeping data w/ (inplace) addition + x = array([1, 2, 3], mask=[0, 0, 1]) + # Test add w/ scalar + xx = x + 1 + assert_equal(xx.data, [2, 3, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test iadd w/ scalar + x += 1 + assert_equal(x.data, [2, 3, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test add w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x + array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 4, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test iadd w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x += array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 4, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_sub(self): + # Test keeping data w/ (inplace) subtraction + # Test sub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - 1 + assert_equal(xx.data, [0, 1, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test isub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= 1 + assert_equal(x.data, [0, 1, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test sub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 0, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test isub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 0, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_mul(self): + # Test keeping data w/ (inplace) multiplication + # Test mul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * 2 + assert_equal(xx.data, [2, 4, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test imul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= 2 + assert_equal(x.data, [2, 4, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test mul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 40, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test imul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(x.data, [1, 40, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_div(self): + # Test keeping data w/ (inplace) division + # Test div on scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x / 2. + assert_equal(xx.data, [1 / 2., 2 / 2., 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test idiv on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= 2. + assert_equal(x.data, [1 / 2., 2 / 2., 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test div on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x / array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(xx.data, [1., 2. / 20., 3.]) + assert_equal(xx.mask, [1, 0, 1]) + # Test idiv on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(x.data, [1., 2 / 20., 3.]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_pow(self): + # Test keeping data w/ (inplace) power + # Test pow on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x ** 2.5 + assert_equal(xx.data, [1., 2. ** 2.5, 3.]) + assert_equal(xx.mask, [0, 0, 1]) + # Test ipow on scalar + x **= 2.5 + assert_equal(x.data, [1., 2. ** 2.5, 3]) + assert_equal(x.mask, [0, 0, 1]) + + def test_datafriendly_add_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_sub_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_mul_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_inplace_addition_scalar_type(self): + # Test of inplace additions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + xm[2] = masked + x += t(1) + assert_equal(x, y + t(1)) + xm += t(1) + assert_equal(xm, y + t(1)) + + def test_inplace_addition_array_type(self): + # Test of inplace additions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_subtraction_scalar_type(self): + # Test of inplace subtractions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x -= t(1) + assert_equal(x, y - t(1)) + xm -= t(1) + assert_equal(xm, y - t(1)) + + def test_inplace_subtraction_array_type(self): + # Test of inplace subtractions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_multiplication_scalar_type(self): + # Test of inplace multiplication + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x *= t(2) + assert_equal(x, y * t(2)) + xm *= t(2) + assert_equal(xm, y * t(2)) + + def test_inplace_multiplication_array_type(self): + # Test of inplace multiplication + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_floor_division_scalar_type(self): + # Test of inplace division + # Check for TypeError in case of unsupported types + unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + try: + x //= t(2) + xm //= t(2) + assert_equal(x, y) + assert_equal(xm, y) + except TypeError: + msg = f"Supported type {t} throwing TypeError" + assert t in unsupported, msg + + def test_inplace_floor_division_array_type(self): + # Test of inplace division + # Check for TypeError in case of unsupported types + unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + try: + x //= a + xm //= a + assert_equal(x, y // a) + assert_equal(xm, y // a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + except TypeError: + msg = f"Supported type {t} throwing TypeError" + assert t in unsupported, msg + + def test_inplace_division_scalar_type(self): + # Test of inplace division + for t in self.othertypes: + with suppress_warnings() as sup: + sup.record(UserWarning) + + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= t(2) + assert_equal(x, y) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + try: + xm /= t(2) + assert_equal(xm, y) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + + if issubclass(t, np.integer): + assert_equal(len(sup.log), 2, f'Failed on type={t}.') + else: + assert_equal(len(sup.log), 0, f'Failed on type={t}.') + + def test_inplace_division_array_type(self): + # Test of inplace division + for t in self.othertypes: + with suppress_warnings() as sup: + sup.record(UserWarning) + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= a + assert_equal(x, y / a) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + try: + xm /= a + assert_equal(xm, y / a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + + if issubclass(t, np.integer): + assert_equal(len(sup.log), 2, f'Failed on type={t}.') + else: + assert_equal(len(sup.log), 0, f'Failed on type={t}.') + + def test_inplace_pow_type(self): + # Test keeping data w/ (inplace) power + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + # Test pow on scalar + x = array([1, 2, 3], mask=[0, 0, 1], dtype=t) + xx = x ** t(2) + xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t) + assert_equal(xx.data, xx_r.data) + assert_equal(xx.mask, xx_r.mask) + # Test ipow on scalar + x **= t(2) + assert_equal(x.data, xx_r.data) + assert_equal(x.mask, xx_r.mask) + + +class TestMaskedArrayMethods: + # Test class for miscellaneous MaskedArrays methods. + def setup_method(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_generic_methods(self): + # Tests some MaskedArray methods. + a = array([1, 3, 2]) + assert_equal(a.any(), a._data.any()) + assert_equal(a.all(), a._data.all()) + assert_equal(a.argmax(), a._data.argmax()) + assert_equal(a.argmin(), a._data.argmin()) + assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) + assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) + assert_equal(a.conj(), a._data.conj()) + assert_equal(a.conjugate(), a._data.conjugate()) + + m = array([[1, 2], [3, 4]]) + assert_equal(m.diagonal(), m._data.diagonal()) + assert_equal(a.sum(), a._data.sum()) + assert_equal(a.take([1, 2]), a._data.take([1, 2])) + assert_equal(m.transpose(), m._data.transpose()) + + def test_allclose(self): + # Tests allclose on arrays + a = np.random.rand(10) + b = a + np.random.rand(10) * 1e-8 + assert_(allclose(a, b)) + # Test allclose w/ infs + a[0] = np.inf + assert_(not allclose(a, b)) + b[0] = np.inf + assert_(allclose(a, b)) + # Test allclose w/ masked + a = masked_array(a) + a[-1] = masked + assert_(allclose(a, b, masked_equal=True)) + assert_(not allclose(a, b, masked_equal=False)) + # Test comparison w/ scalar + a *= 1e-8 + a[0] = 0 + assert_(allclose(a, 0, masked_equal=True)) + + # Test that the function works for MIN_INT integer typed arrays + a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) + assert_(allclose(a, a)) + + def test_allclose_timedelta(self): + # Allclose currently works for timedelta64 as long as `atol` is + # an integer or also a timedelta64 + a = np.array([[1, 2, 3, 4]], dtype="m8[ns]") + assert allclose(a, a, atol=0) + assert allclose(a, a, atol=np.timedelta64(1, "ns")) + + def test_allany(self): + # Checks the any/all methods/functions. + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool) + mx = masked_array(x, mask=m) + mxbig = (mx > 0.5) + mxsmall = (mx < 0.5) + + assert_(not mxbig.all()) + assert_(mxbig.any()) + assert_equal(mxbig.all(0), [False, False, True]) + assert_equal(mxbig.all(1), [False, False, True]) + assert_equal(mxbig.any(0), [False, False, True]) + assert_equal(mxbig.any(1), [True, True, True]) + + assert_(not mxsmall.all()) + assert_(mxsmall.any()) + assert_equal(mxsmall.all(0), [True, True, False]) + assert_equal(mxsmall.all(1), [False, False, False]) + assert_equal(mxsmall.any(0), [True, True, False]) + assert_equal(mxsmall.any(1), [True, True, False]) + + def test_allany_oddities(self): + # Some fun with all and any + store = empty((), dtype=bool) + full = array([1, 2, 3], mask=True) + + assert_(full.all() is masked) + full.all(out=store) + assert_(store) + assert_(store._mask, True) + assert_(store is not masked) + + store = empty((), dtype=bool) + assert_(full.any() is masked) + full.any(out=store) + assert_(not store) + assert_(store._mask, True) + assert_(store is not masked) + + def test_argmax_argmin(self): + # Tests argmin & argmax on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + + assert_equal(mx.argmin(), 35) + assert_equal(mX.argmin(), 35) + assert_equal(m2x.argmin(), 4) + assert_equal(m2X.argmin(), 4) + assert_equal(mx.argmax(), 28) + assert_equal(mX.argmax(), 28) + assert_equal(m2x.argmax(), 31) + assert_equal(m2X.argmax(), 31) + + assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) + assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) + assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) + assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) + + assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) + assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) + assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) + assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) + + def test_clip(self): + # Tests clip on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) + mx = array(x, mask=m) + clipped = mx.clip(2, 8) + assert_equal(clipped.mask, mx.mask) + assert_equal(clipped._data, x.clip(2, 8)) + assert_equal(clipped._data, mx._data.clip(2, 8)) + + def test_clip_out(self): + # gh-14140 + a = np.arange(10) + m = np.ma.MaskedArray(a, mask=[0, 1] * 5) + m.clip(0, 5, out=m) + assert_equal(m.mask, [0, 1] * 5) + + def test_compress(self): + # test compress + a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) + condition = (a > 1.5) & (a < 3.5) + assert_equal(a.compress(condition), [2., 3.]) + + a[[2, 3]] = masked + b = a.compress(condition) + assert_equal(b._data, [2., 3.]) + assert_equal(b._mask, [0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + condition = (a < 4.) + b = a.compress(condition) + assert_equal(b._data, [1., 2., 3.]) + assert_equal(b._mask, [0, 0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + a = masked_array([[10, 20, 30], [40, 50, 60]], + mask=[[0, 0, 1], [1, 0, 0]]) + b = a.compress(a.ravel() >= 22) + assert_equal(b._data, [30, 40, 50, 60]) + assert_equal(b._mask, [1, 1, 0, 0]) + + x = np.array([3, 1, 2]) + b = a.compress(x >= 2, axis=1) + assert_equal(b._data, [[10, 30], [40, 60]]) + assert_equal(b._mask, [[0, 1], [1, 0]]) + + def test_compressed(self): + # Tests compressed + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + a[0] = masked + b = a.compressed() + assert_equal(b, [2, 3, 4]) + + def test_empty(self): + # Tests empty/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = empty_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = empty(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check empty_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = empty_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view(masked_array) + assert_(np.may_share_memory(a.mask, b.mask)) + + def test_zeros(self): + # Tests zeros/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = zeros(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = zeros_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check zeros_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = zeros_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view() + assert_(np.may_share_memory(a.mask, b.mask)) + + def test_ones(self): + # Tests ones/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = ones(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = ones_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check ones_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = ones_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view() + assert_(np.may_share_memory(a.mask, b.mask)) + + @suppress_copy_mask_on_assignment + def test_put(self): + # Tests put. + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_equal(x, [0, 10, 2, -1, 40]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + i = [0, 2, 4, 6] + x.put(i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + put(x, i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + def test_put_nomask(self): + # GitHub issue 6425 + x = zeros(10) + z = array([3., -1.], mask=[False, True]) + + x.put([1, 2], z) + assert_(x[0] is not masked) + assert_equal(x[0], 0) + assert_(x[1] is not masked) + assert_equal(x[1], 3) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_equal(x[3], 0) + + def test_put_hardmask(self): + # Tests put on hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d + 1, mask=m, hard_mask=True, copy=True) + xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) + assert_equal(xh._data, [3, 4, 2, 4, 5]) + + def test_putmask(self): + x = arange(6) + 1 + mx = array(x, mask=[0, 0, 0, 1, 1, 1]) + mask = [0, 0, 1, 0, 0, 1] + # w/o mask, w/o masked values + xx = x.copy() + putmask(xx, mask, 99) + assert_equal(xx, [1, 2, 99, 4, 5, 99]) + # w/ mask, w/o masked values + mxx = mx.copy() + putmask(mxx, mask, 99) + assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) + assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) + # w/o mask, w/ masked values + values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) + xx = x.copy() + putmask(xx, mask, values) + assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) + # w/ mask, w/ masked values + mxx = mx.copy() + putmask(mxx, mask, values) + assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) + # w/ mask, w/ masked values + hardmask + mxx = mx.copy() + mxx.harden_mask() + putmask(mxx, mask, values) + assert_equal(mxx, [1, 2, 30, 4, 5, 60]) + + def test_ravel(self): + # Tests ravel + a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, aravel.shape) + a = array([0, 0], mask=[1, 1]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, a.shape) + # Checks that small_mask is preserved + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) + assert_equal(a.ravel()._mask, [0, 0, 0, 0]) + # Test that the fill_value is preserved + a.fill_value = -99 + a.shape = (2, 2) + ar = a.ravel() + assert_equal(ar._mask, [0, 0, 0, 0]) + assert_equal(ar._data, [1, 2, 3, 4]) + assert_equal(ar.fill_value, -99) + # Test index ordering + assert_equal(a.ravel(order='C'), [1, 2, 3, 4]) + assert_equal(a.ravel(order='F'), [1, 3, 2, 4]) + + @pytest.mark.parametrize("order", "AKCF") + @pytest.mark.parametrize("data_order", "CF") + def test_ravel_order(self, order, data_order): + # Ravelling must ravel mask and data in the same order always to avoid + # misaligning the two in the ravel result. + arr = np.ones((5, 10), order=data_order) + arr[0, :] = 0 + mask = np.ones((10, 5), dtype=bool, order=data_order).T + mask[0, :] = False + x = array(arr, mask=mask) + assert x._data.flags.fnc != x._mask.flags.fnc + assert (x.filled(0) == 0).all() + raveled = x.ravel(order) + assert (raveled.filled(0) == 0).all() + + # NOTE: Can be wrong if arr order is neither C nor F and `order="K"` + assert_array_equal(arr.ravel(order), x.ravel(order)._data) + + def test_reshape(self): + # Tests reshape + x = arange(4) + x[0] = masked + y = x.reshape(2, 2) + assert_equal(y.shape, (2, 2,)) + assert_equal(y._mask.shape, (2, 2,)) + assert_equal(x.shape, (4,)) + assert_equal(x._mask.shape, (4,)) + + def test_sort(self): + # Test sort + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + sortedx = sort(x) + assert_equal(sortedx._data, [1, 2, 3, 4]) + assert_equal(sortedx._mask, [0, 0, 0, 1]) + + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [4, 1, 2, 3]) + assert_equal(sortedx._mask, [1, 0, 0, 0]) + + x.sort() + assert_equal(x._data, [1, 2, 3, 4]) + assert_equal(x._mask, [0, 0, 0, 1]) + + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + x.sort(endwith=False) + assert_equal(x._data, [4, 1, 2, 3]) + assert_equal(x._mask, [1, 0, 0, 0]) + + x = [1, 4, 2, 3] + sortedx = sort(x) + assert_(not isinstance(sorted, MaskedArray)) + + x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) + x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [1, 2, -2, -1, 0]) + assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) + + x = array([0, -1], dtype=np.int8) + sortedx = sort(x, kind="stable") + assert_equal(sortedx, array([-1, 0], dtype=np.int8)) + + def test_stable_sort(self): + x = array([1, 2, 3, 1, 2, 3], dtype=np.uint8) + expected = array([0, 3, 1, 4, 2, 5]) + computed = argsort(x, kind='stable') + assert_equal(computed, expected) + + def test_argsort_matches_sort(self): + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + for kwargs in [dict(), + dict(endwith=True), + dict(endwith=False), + dict(fill_value=2), + dict(fill_value=2, endwith=True), + dict(fill_value=2, endwith=False)]: + sortedx = sort(x, **kwargs) + argsortedx = x[argsort(x, **kwargs)] + assert_equal(sortedx._data, argsortedx._data) + assert_equal(sortedx._mask, argsortedx._mask) + + def test_sort_2d(self): + # Check sort of 2D array. + # 2D array w/o mask + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + # 2D array w/mask + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) + # 3D + a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], + [[1, 2, 3], [7, 8, 9], [4, 5, 6]], + [[7, 8, 9], [1, 2, 3], [4, 5, 6]], + [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) + a[a % 4 == 0] = masked + am = a.copy() + an = a.filled(99) + am.sort(0) + an.sort(0) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(1) + an.sort(1) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(2) + an.sort(2) + assert_equal(am, an) + + def test_sort_flexible(self): + # Test sort on structured dtype. + a = array( + data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], + mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], + dtype=[('A', int), ('B', int)]) + mask_last = array( + data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], + mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], + dtype=[('A', int), ('B', int)]) + mask_first = array( + data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)], + mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)], + dtype=[('A', int), ('B', int)]) + + test = sort(a) + assert_equal(test, mask_last) + assert_equal(test.mask, mask_last.mask) + + test = sort(a, endwith=False) + assert_equal(test, mask_first) + assert_equal(test.mask, mask_first.mask) + + # Test sort on dtype with subarray (gh-8069) + # Just check that the sort does not error, structured array subarrays + # are treated as byte strings and that leads to differing behavior + # depending on endianness and `endwith`. + dt = np.dtype([('v', int, 2)]) + a = a.view(dt) + test = sort(a) + test = sort(a, endwith=False) + + def test_argsort(self): + # Test argsort + a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) + assert_equal(np.argsort(a), argsort(a)) + + def test_squeeze(self): + # Check squeeze + data = masked_array([[1, 2, 3]]) + assert_equal(data.squeeze(), [1, 2, 3]) + data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) + assert_equal(data.squeeze(), [1, 2, 3]) + assert_equal(data.squeeze()._mask, [1, 1, 1]) + + # normal ndarrays return a view + arr = np.array([[1]]) + arr_sq = arr.squeeze() + assert_equal(arr_sq, 1) + arr_sq[...] = 2 + assert_equal(arr[0,0], 2) + + # so maskedarrays should too + m_arr = masked_array([[1]], mask=True) + m_arr_sq = m_arr.squeeze() + assert_(m_arr_sq is not np.ma.masked) + assert_equal(m_arr_sq.mask, True) + m_arr_sq[...] = 2 + assert_equal(m_arr[0,0], 2) + + def test_swapaxes(self): + # Tests swapaxes on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mX = array(x, mask=m).reshape(6, 6) + mXX = mX.reshape(3, 2, 2, 3) + + mXswapped = mX.swapaxes(0, 1) + assert_equal(mXswapped[-1], mX[:, -1]) + + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_take(self): + # Tests take + x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) + assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) + assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) + assert_equal(x.take([[0, 1], [0, 1]]), + masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) + + # assert_equal crashes when passed np.ma.mask + assert_(x[1] is np.ma.masked) + assert_(x.take(1) is np.ma.masked) + + x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) + assert_equal(x.take([0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + assert_equal(take(x, [0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + + def test_take_masked_indices(self): + # Test take w/ masked indices + a = np.array((40, 18, 37, 9, 22)) + indices = np.arange(3)[None,:] + np.arange(5)[:, None] + mindices = array(indices, mask=(indices >= len(a))) + # No mask + test = take(a, mindices, mode='clip') + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 22], + [22, 22, 22]]) + assert_equal(test, ctrl) + # Masked indices + test = take(a, mindices) + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 40], + [22, 40, 40]]) + ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # Masked input + masked indices + a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) + test = take(a, mindices) + ctrl[0, 1] = ctrl[1, 0] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + def test_tolist(self): + # Tests to list + # ... on 1D + x = array(np.arange(12)) + x[[1, -2]] = masked + xlist = x.tolist() + assert_(xlist[1] is None) + assert_(xlist[-2] is None) + # ... on 2D + x.shape = (3, 4) + xlist = x.tolist() + ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] + assert_equal(xlist[0], [0, None, 2, 3]) + assert_equal(xlist[1], [4, 5, 6, 7]) + assert_equal(xlist[2], [8, 9, None, 11]) + assert_equal(xlist, ctrl) + # ... on structured array w/ masked records + x = array(list(zip([1, 2, 3], + [1.1, 2.2, 3.3], + ['one', 'two', 'thr'])), + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x[-1] = masked + assert_equal(x.tolist(), + [(1, 1.1, b'one'), + (2, 2.2, b'two'), + (None, None, None)]) + # ... on structured array w/ masked fields + a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], + dtype=[('a', int), ('b', int)]) + test = a.tolist() + assert_equal(test, [[1, None], [3, 4]]) + # ... on mvoid + a = a[0] + test = a.tolist() + assert_equal(test, [1, None]) + + def test_tolist_specialcase(self): + # Test mvoid.tolist: make sure we return a standard Python object + a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) + # w/o mask: each entry is a np.void whose elements are standard Python + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + # w/ mask: each entry is a ma.void whose elements should be + # standard Python + a.mask[0] = (0, 1) + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + + def test_toflex(self): + # Test the conversion to records + data = arange(10) + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = [('i', int), ('s', '|S3'), ('f', float)] + data = array([(i, s, f) for (i, s, f) in zip(np.arange(10), + 'ABCDEFGHIJKLM', + np.random.rand(10))], + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = np.dtype("int, (2,3)float, float") + data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10), + np.random.rand(10), + np.random.rand(10))], + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal_records(record['_data'], data._data) + assert_equal_records(record['_mask'], data._mask) + + def test_fromflex(self): + # Test the reconstruction of a masked_array from a record + a = array([1, 2, 3]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([1, 2, 3], mask=[0, 0, 1]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], + dtype=[('A', int), ('B', float)]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.data, a.data) + + def test_arraymethod(self): + # Test a _arraymethod w/ n argument + marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) + control = masked_array([[1], [2], [3], [4], [5]], + mask=[0, 0, 1, 0, 0]) + assert_equal(marray.T, control) + assert_equal(marray.transpose(), control) + + assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) + + def test_arraymethod_0d(self): + # gh-9430 + x = np.ma.array(42, mask=True) + assert_equal(x.T.mask, x.mask) + assert_equal(x.T.data, x.data) + + def test_transpose_view(self): + x = np.ma.array([[1, 2, 3], [4, 5, 6]]) + x[0,1] = np.ma.masked + xt = x.T + + xt[1,0] = 10 + xt[0,1] = np.ma.masked + + assert_equal(x.data, xt.T.data) + assert_equal(x.mask, xt.T.mask) + + def test_diagonal_view(self): + x = np.ma.zeros((3,3)) + x[0,0] = 10 + x[1,1] = np.ma.masked + x[2,2] = 20 + xd = x.diagonal() + x[1,1] = 15 + assert_equal(xd.mask, x.diagonal().mask) + assert_equal(xd.data, x.diagonal().data) + + +class TestMaskedArrayMathMethods: + + def setup_method(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_cumsumprod(self): + # Tests cumsum & cumprod on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXcp = mX.cumsum(0) + assert_equal(mXcp._data, mX.filled(0).cumsum(0)) + mXcp = mX.cumsum(1) + assert_equal(mXcp._data, mX.filled(0).cumsum(1)) + + mXcp = mX.cumprod(0) + assert_equal(mXcp._data, mX.filled(1).cumprod(0)) + mXcp = mX.cumprod(1) + assert_equal(mXcp._data, mX.filled(1).cumprod(1)) + + def test_cumsumprod_with_output(self): + # Tests cumsum/cumprod w/ output + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + for funcname in ('cumsum', 'cumprod'): + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty((3, 4), dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + + def test_ptp(self): + # Tests ptp on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + (n, m) = X.shape + assert_equal(mx.ptp(), np.ptp(mx.compressed())) + rows = np.zeros(n, float) + cols = np.zeros(m, float) + for k in range(m): + cols[k] = np.ptp(mX[:, k].compressed()) + for k in range(n): + rows[k] = np.ptp(mX[k].compressed()) + assert_equal(mX.ptp(0), cols) + assert_equal(mX.ptp(1), rows) + + def test_add_object(self): + x = masked_array(['a', 'b'], mask=[1, 0], dtype=object) + y = x + 'x' + assert_equal(y[1], 'bx') + assert_(y.mask[0]) + + def test_sum_object(self): + # Test sum on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.sum(), 5) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.sum(axis=0), [5, 7, 9]) + + def test_prod_object(self): + # Test prod on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.prod(), 2 * 3) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.prod(axis=0), [4, 10, 18]) + + def test_meananom_object(self): + # Test mean/anom on object dtype + a = masked_array([1, 2, 3], dtype=object) + assert_equal(a.mean(), 2) + assert_equal(a.anom(), [-1, 0, 1]) + + def test_anom_shape(self): + a = masked_array([1, 2, 3]) + assert_equal(a.anom().shape, a.shape) + a.mask = True + assert_equal(a.anom().shape, a.shape) + assert_(np.ma.is_masked(a.anom())) + + def test_anom(self): + a = masked_array(np.arange(1, 7).reshape(2, 3)) + assert_almost_equal(a.anom(), + [[-2.5, -1.5, -0.5], [0.5, 1.5, 2.5]]) + assert_almost_equal(a.anom(axis=0), + [[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]]) + assert_almost_equal(a.anom(axis=1), + [[-1., 0., 1.], [-1., 0., 1.]]) + a.mask = [[0, 0, 1], [0, 1, 0]] + mval = -99 + assert_almost_equal(a.anom().filled(mval), + [[-2.25, -1.25, mval], [0.75, mval, 2.75]]) + assert_almost_equal(a.anom(axis=0).filled(mval), + [[-1.5, 0.0, mval], [1.5, mval, 0.0]]) + assert_almost_equal(a.anom(axis=1).filled(mval), + [[-0.5, 0.5, mval], [-1.0, mval, 1.0]]) + + def test_trace(self): + # Tests trace on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_almost_equal(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0)) + assert_equal(np.trace(mX), mX.trace()) + + # gh-5560 + arr = np.arange(2*4*4).reshape(2,4,4) + m_arr = np.ma.masked_array(arr, False) + assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2)) + + def test_dot(self): + # Tests dot on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + fx = mx.filled(0) + r = mx.dot(mx) + assert_almost_equal(r.filled(0), fx.dot(fx)) + assert_(r.mask is nomask) + + fX = mX.filled(0) + r = mX.dot(mX) + assert_almost_equal(r.filled(0), fX.dot(fX)) + assert_(r.mask[1,3]) + r1 = empty_like(r) + mX.dot(mX, out=r1) + assert_almost_equal(r, r1) + + mYY = mXX.swapaxes(-1, -2) + fXX, fYY = mXX.filled(0), mYY.filled(0) + r = mXX.dot(mYY) + assert_almost_equal(r.filled(0), fXX.dot(fYY)) + r1 = empty_like(r) + mXX.dot(mYY, out=r1) + assert_almost_equal(r, r1) + + def test_dot_shape_mismatch(self): + # regression test + x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) + y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) + z = masked_array([[0,1],[3,3]]) + x.dot(y, out=z) + assert_almost_equal(z.filled(0), [[1, 0], [15, 16]]) + assert_almost_equal(z.mask, [[0, 1], [0, 0]]) + + def test_varmean_nomask(self): + # gh-5769 + foo = array([1,2,3,4], dtype='f8') + bar = array([1,2,3,4], dtype='f8') + assert_equal(type(foo.mean()), np.float64) + assert_equal(type(foo.var()), np.float64) + assert((foo.mean() == bar.mean()) is np.bool(True)) + + # check array type is preserved and out works + foo = array(np.arange(16).reshape((4,4)), dtype='f8') + bar = empty(4, dtype='f4') + assert_equal(type(foo.mean(axis=1)), MaskedArray) + assert_equal(type(foo.var(axis=1)), MaskedArray) + assert_(foo.mean(axis=1, out=bar) is bar) + assert_(foo.var(axis=1, out=bar) is bar) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_almost_equal(mX.std(axis=None, ddof=1), + mX.compressed().std(ddof=1)) + assert_almost_equal(mX.var(axis=None, ddof=1), + mX.compressed().var(ddof=1)) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + @suppress_copy_mask_on_assignment + def test_varstd_specialcases(self): + # Test a special case for var + nout = np.array(-1, dtype=float) + mout = array(-1, dtype=float) + + x = array(arange(10), mask=True) + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method() is masked) + assert_(method(0) is masked) + assert_(method(-1) is masked) + # Using a masked array as explicit output + method(out=mout) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout) + assert_(np.isnan(nout)) + + x = array(arange(10), mask=True) + x[-1] = 9 + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method(ddof=1) is masked) + assert_(method(0, ddof=1) is masked) + assert_(method(-1, ddof=1) is masked) + # Using a masked array as explicit output + method(out=mout, ddof=1) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout, ddof=1) + assert_(np.isnan(nout)) + + def test_varstd_ddof(self): + a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) + test = a.std(axis=0, ddof=0) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=1) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=2) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [1, 1, 1]) + + def test_diag(self): + # Test diag + x = arange(9).reshape((3, 3)) + x[1, 1] = masked + out = np.diag(x) + assert_equal(out, [0, 4, 8]) + out = diag(x) + assert_equal(out, [0, 4, 8]) + assert_equal(out.mask, [0, 1, 0]) + out = diag(out) + control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], + mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(out, control) + + def test_axis_methods_nomask(self): + # Test the combination nomask & methods w/ axis + a = array([[1, 2, 3], [4, 5, 6]]) + + assert_equal(a.sum(0), [5, 7, 9]) + assert_equal(a.sum(-1), [6, 15]) + assert_equal(a.sum(1), [6, 15]) + + assert_equal(a.prod(0), [4, 10, 18]) + assert_equal(a.prod(-1), [6, 120]) + assert_equal(a.prod(1), [6, 120]) + + assert_equal(a.min(0), [1, 2, 3]) + assert_equal(a.min(-1), [1, 4]) + assert_equal(a.min(1), [1, 4]) + + assert_equal(a.max(0), [4, 5, 6]) + assert_equal(a.max(-1), [3, 6]) + assert_equal(a.max(1), [3, 6]) + + @requires_memory(free_bytes=2 * 10000 * 1000 * 2) + def test_mean_overflow(self): + # Test overflow in masked arrays + # gh-20272 + a = masked_array(np.full((10000, 10000), 65535, dtype=np.uint16), + mask=np.zeros((10000, 10000))) + assert_equal(a.mean(), 65535.0) + + def test_diff_with_prepend(self): + # GH 22465 + x = np.array([1, 2, 2, 3, 4, 2, 1, 1]) + + a = np.ma.masked_equal(x[3:], value=2) + a_prep = np.ma.masked_equal(x[:3], value=2) + diff1 = np.ma.diff(a, prepend=a_prep, axis=0) + + b = np.ma.masked_equal(x, value=2) + diff2 = np.ma.diff(b, axis=0) + + assert_(np.ma.allequal(diff1, diff2)) + + def test_diff_with_append(self): + # GH 22465 + x = np.array([1, 2, 2, 3, 4, 2, 1, 1]) + + a = np.ma.masked_equal(x[:3], value=2) + a_app = np.ma.masked_equal(x[3:], value=2) + diff1 = np.ma.diff(a, append=a_app, axis=0) + + b = np.ma.masked_equal(x, value=2) + diff2 = np.ma.diff(b, axis=0) + + assert_(np.ma.allequal(diff1, diff2)) + + def test_diff_with_dim_0(self): + with pytest.raises( + ValueError, + match="diff requires input that is at least one dimensional" + ): + np.ma.diff(np.array(1)) + + def test_diff_with_n_0(self): + a = np.ma.masked_equal([1, 2, 2, 3, 4, 2, 1, 1], value=2) + diff = np.ma.diff(a, n=0, axis=0) + + assert_(np.ma.allequal(a, diff)) + + +class TestMaskedArrayMathMethodsComplex: + # Test class for miscellaneous MaskedArrays methods. + def setup_method(self): + # Base data definition. + x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j, + 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + +class TestMaskedArrayFunctions: + # Test class for miscellaneous functions. + + def setup_method(self): + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + self.info = (xm, ym) + + def test_masked_where_bool(self): + x = [1, 2] + y = masked_where(False, x) + assert_equal(y, [1, 2]) + assert_equal(y[1], 2) + + def test_masked_equal_wlist(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [0, 0, 1]) + mx = masked_not_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [1, 1, 0]) + + def test_masked_equal_fill_value(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx._mask, [0, 0, 1]) + assert_equal(mx.fill_value, 3) + + def test_masked_where_condition(self): + # Tests masking functions. + x = array([1., 2., 3., 4., 5.]) + x[2] = masked + assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) + assert_equal(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2)) + assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) + assert_equal(masked_where(less_equal(x, 2), x), + masked_less_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5]) + + def test_masked_where_oddities(self): + # Tests some generic features. + atest = ones((10, 10, 10), dtype=float) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_equal(atest, ctest) + + def test_masked_where_shape_constraint(self): + a = arange(10) + with assert_raises(IndexError): + masked_equal(1, a) + test = masked_equal(a, 1) + assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) + + def test_masked_where_structured(self): + # test that masked_where on a structured array sets a structured + # mask (see issue #2972) + a = np.zeros(10, dtype=[("A", " 6, x) + + def test_masked_otherfunctions(self): + assert_equal(masked_inside(list(range(5)), 1, 3), + [0, 199, 199, 199, 4]) + assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]) + assert_equal(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0]) + assert_equal(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1]) + assert_equal(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0]) + assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1]) + + def test_round(self): + a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], + mask=[0, 1, 0, 0, 0]) + assert_equal(a.round(), [1., 2., 3., 5., 6.]) + assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) + assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) + b = empty_like(a) + a.round(out=b) + assert_equal(b, [1., 2., 3., 5., 6.]) + + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + def test_round_with_output(self): + # Testing round with an explicit output + + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = np.round(xm, decimals=2, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xm.round(decimals=2, out=output)) + + output = empty((3, 4), dtype=float) + result = xm.round(decimals=2, out=output) + assert_(result is output) + + def test_round_with_scalar(self): + # Testing round with scalar/zero dimension input + # GH issue 2244 + a = array(1.1, mask=[False]) + assert_equal(a.round(), 1) + + a = array(1.1, mask=[True]) + assert_(a.round() is masked) + + a = array(1.1, mask=[False]) + output = np.empty(1, dtype=float) + output.fill(-9999) + a.round(out=output) + assert_equal(output, 1) + + a = array(1.1, mask=[False]) + output = array(-9999., mask=[True]) + a.round(out=output) + assert_equal(output[()], 1) + + a = array(1.1, mask=[True]) + output = array(-9999., mask=[False]) + a.round(out=output) + assert_(output[()] is masked) + + def test_identity(self): + a = identity(5) + assert_(isinstance(a, MaskedArray)) + assert_equal(a, np.identity(5)) + + def test_power(self): + x = -1.1 + assert_almost_equal(power(x, 2.), 1.21) + assert_(power(x, masked) is masked) + x = array([-1.1, -1.1, 1.1, 1.1, 0.]) + b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) + y = power(x, b) + assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + b.mask = nomask + y = power(x, b) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + z = x ** b + assert_equal(z._mask, y._mask) + assert_almost_equal(z, y) + assert_almost_equal(z._data, y._data) + x **= b + assert_equal(x._mask, y._mask) + assert_almost_equal(x, y) + assert_almost_equal(x._data, y._data) + + def test_power_with_broadcasting(self): + # Test power w/ broadcasting + a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) + a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) + b1 = np.array([2, 4, 3]) + b2 = np.array([b1, b1]) + b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) + + ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], + mask=[[1, 1, 0], [0, 1, 1]]) + # No broadcasting, base & exp w/ mask + test = a2m ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # No broadcasting, base w/ mask, exp w/o mask + test = a2m ** b2 + assert_equal(test, ctrl) + assert_equal(test.mask, a2m.mask) + # No broadcasting, base w/o mask, exp w/ mask + test = a2 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, b2m.mask) + + ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], + mask=[[0, 1, 0], [0, 1, 0]]) + test = b1 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + test = b2m ** b1 + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_where(self): + # Test the where function + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + + d = where(xm > 2, xm, -9) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + assert_equal(d._mask, xm._mask) + d = where(xm > 2, -9, ym) + assert_equal(d, [5., 0., 3., 2., -1., -9., + -9., -10., -9., 1., 0., -9.]) + assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) + d = where(xm > 2, xm, masked) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + tmp = xm._mask.copy() + tmp[(xm <= 2).filled(True)] = True + assert_equal(d._mask, tmp) + + with np.errstate(invalid="warn"): + # The fill value is 1e20, it cannot be converted to `int`: + with pytest.warns(RuntimeWarning, match="invalid value"): + ixm = xm.astype(int) + d = where(ixm > 2, ixm, masked) + assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) + assert_equal(d.dtype, ixm.dtype) + + def test_where_object(self): + a = np.array(None) + b = masked_array(None) + r = b.copy() + assert_equal(np.ma.where(True, a, a), r) + assert_equal(np.ma.where(True, b, b), r) + + def test_where_with_masked_choice(self): + x = arange(10) + x[3] = masked + c = x >= 8 + # Set False to masked + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_equal(x, z) + # Set True to masked + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + + def test_where_with_masked_condition(self): + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + x = arange(1, 6) + x[-1] = masked + y = arange(1, 6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_equal(z, zm) + assert_(getmask(zm) is nomask) + assert_equal(zm, [1, 2, 3, 40, 50]) + z = where(c, masked, 1) + assert_equal(z, [99, 99, 99, 1, 1]) + z = where(c, 1, masked) + assert_equal(z, [99, 1, 1, 99, 99]) + + def test_where_type(self): + # Test the type conservation with where + x = np.arange(4, dtype=np.int32) + y = np.arange(4, dtype=np.float32) * 2.2 + test = where(x > 1.5, y, x).dtype + control = np.result_type(np.int32, np.float32) + assert_equal(test, control) + + def test_where_broadcast(self): + # Issue 8599 + x = np.arange(9).reshape(3, 3) + y = np.zeros(3) + core = np.where([1, 0, 1], x, y) + ma = where([1, 0, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured(self): + # Issue 8600 + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + y = np.array((10, 20), dtype=dt) + core = np.where([0, 1, 1], x, y) + ma = np.where([0, 1, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured_masked(self): + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + + ma = where([0, 1, 1], x, masked) + expected = masked_where([1, 0, 0], x) + + assert_equal(ma.dtype, expected.dtype) + assert_equal(ma, expected) + assert_equal(ma.mask, expected.mask) + + def test_masked_invalid_error(self): + a = np.arange(5, dtype=object) + a[3] = np.inf + a[2] = np.nan + with pytest.raises(TypeError, + match="not supported for the input types"): + np.ma.masked_invalid(a) + + def test_masked_invalid_pandas(self): + # getdata() used to be bad for pandas series due to its _data + # attribute. This test is a regression test mainly and may be + # removed if getdata() is adjusted. + class Series(): + _data = "nonsense" + + def __array__(self, dtype=None, copy=None): + return np.array([5, np.nan, np.inf]) + + arr = np.ma.masked_invalid(Series()) + assert_array_equal(arr._data, np.array(Series())) + assert_array_equal(arr._mask, [False, True, True]) + + @pytest.mark.parametrize("copy", [True, False]) + def test_masked_invalid_full_mask(self, copy): + # Matplotlib relied on masked_invalid always returning a full mask + # (Also astropy projects, but were ok with it gh-22720 and gh-22842) + a = np.ma.array([1, 2, 3, 4]) + assert a._mask is nomask + res = np.ma.masked_invalid(a, copy=copy) + assert res.mask is not nomask + # mask of a should not be mutated + assert a.mask is nomask + assert np.may_share_memory(a._data, res._data) != copy + + def test_choose(self): + # Test choose + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + chosen = choose([2, 3, 1, 0], choices) + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='clip') + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='wrap') + assert_equal(chosen, array([20, 1, 12, 3])) + # Check with some masked indices + indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([99, 1, 12, 99])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + # Check with some masked choices + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([20, 31, 12, 3])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + + def test_choose_with_out(self): + # Test choose with an explicit out keyword + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + store = empty(4, dtype=int) + chosen = choose([2, 3, 1, 0], choices, out=store) + assert_equal(store, array([20, 31, 12, 3])) + assert_(store is chosen) + # Check with some masked indices + out + store = empty(4, dtype=int) + indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([99, 31, 12, 99])) + assert_equal(store.mask, [1, 0, 0, 1]) + # Check with some masked choices + out ina ndarray ! + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + store = empty(4, dtype=int).view(ndarray) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([999999, 31, 12, 999999])) + + def test_reshape(self): + a = arange(10) + a[0] = masked + # Try the default + b = a.reshape((5, 2)) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ arguments as list instead of tuple + b = a.reshape(5, 2) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ order + b = a.reshape((5, 2), order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + # Try w/ order + b = a.reshape(5, 2, order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + + c = np.reshape(a, (2, 5)) + assert_(isinstance(c, MaskedArray)) + assert_equal(c.shape, (2, 5)) + assert_(c[0, 0] is masked) + assert_(c.flags['C']) + + def test_make_mask_descr(self): + # Flexible + ntype = [('a', float), ('b', float)] + test = make_mask_descr(ntype) + assert_equal(test, [('a', bool), ('b', bool)]) + assert_(test is make_mask_descr(test)) + + # Standard w/ shape + ntype = (float, 2) + test = make_mask_descr(ntype) + assert_equal(test, (bool, 2)) + assert_(test is make_mask_descr(test)) + + # Standard standard + ntype = float + test = make_mask_descr(ntype) + assert_equal(test, np.dtype(bool)) + assert_(test is make_mask_descr(test)) + + # Nested + ntype = [('a', float), ('b', [('ba', float), ('bb', float)])] + test = make_mask_descr(ntype) + control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) + assert_equal(test, control) + assert_(test is make_mask_descr(test)) + + # Named+ shape + ntype = [('a', (float, 2))] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([('a', (bool, 2))])) + assert_(test is make_mask_descr(test)) + + # 2 names + ntype = [(('A', 'a'), float)] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([(('A', 'a'), bool)])) + assert_(test is make_mask_descr(test)) + + # nested boolean types should preserve identity + base_type = np.dtype([('a', int, 3)]) + base_mtype = make_mask_descr(base_type) + sub_type = np.dtype([('a', int), ('b', base_mtype)]) + test = make_mask_descr(sub_type) + assert_equal(test, np.dtype([('a', bool), ('b', [('a', bool, 3)])])) + assert_(test.fields['b'][0] is base_mtype) + + def test_make_mask(self): + # Test make_mask + # w/ a list as an input + mask = [0, 1] + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a ndarray as an input + mask = np.array([0, 1], dtype=bool) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a flexible-type ndarray as an input - use default + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [1, 1]) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, mdtype) + assert_equal(test, mask) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', float), ('b', float)] + bdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, bdtype) + assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) + # Ensure this also works for void + mask = np.array((False, True), dtype='?,?')[()] + assert_(isinstance(mask, np.void)) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test, mask) + assert_(test is not mask) + mask = np.array((0, 1), dtype='i4,i4')[()] + test2 = make_mask(mask, dtype=mask.dtype) + assert_equal(test2, test) + # test that nomask is returned when m is nomask. + bools = [True, False] + dtypes = [MaskType, float] + msgformat = 'copy=%s, shrink=%s, dtype=%s' + for cpy, shr, dt in itertools.product(bools, bools, dtypes): + res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) + assert_(res is nomask, msgformat % (cpy, shr, dt)) + + def test_mask_or(self): + # Initialize + mtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) + # Test using nomask as input + test = mask_or(mask, nomask) + assert_equal(test, mask) + test = mask_or(nomask, mask) + assert_equal(test, mask) + # Using False as input + test = mask_or(mask, False) + assert_equal(test, mask) + # Using another array w / the same dtype + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) + test = mask_or(mask, other) + control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) + assert_equal(test, control) + # Using another array w / a different dtype + othertype = [('A', bool), ('B', bool)] + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) + try: + test = mask_or(mask, other) + except ValueError: + pass + # Using nested arrays + dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) + bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) + cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) + assert_equal(mask_or(amask, bmask), cntrl) + + def test_flatten_mask(self): + # Tests flatten mask + # Standard dtype + mask = np.array([0, 0, 1], dtype=bool) + assert_equal(flatten_mask(mask), mask) + # Flexible dtype + mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + data = [(0, (0, 0)), (0, (0, 1))] + mask = np.array(data, dtype=mdtype) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + def test_on_ndarray(self): + # Test functions on ndarrays + a = np.array([1, 2, 3, 4]) + m = array(a, mask=False) + test = anom(a) + assert_equal(test, m.anom()) + test = reshape(a, (2, 2)) + assert_equal(test, m.reshape(2, 2)) + + def test_compress(self): + # Test compress function on ndarray and masked array + # Address Github #2495. + arr = np.arange(8) + arr.shape = 4, 2 + cond = np.array([True, False, True, True]) + control = arr[[0, 2, 3]] + test = np.ma.compress(cond, arr, axis=0) + assert_equal(test, control) + marr = np.ma.array(arr) + test = np.ma.compress(cond, marr, axis=0) + assert_equal(test, control) + + def test_compressed(self): + # Test ma.compressed function. + # Address gh-4026 + a = np.ma.array([1, 2]) + test = np.ma.compressed(a) + assert_(type(test) is np.ndarray) + + # Test case when input data is ndarray subclass + class A(np.ndarray): + pass + + a = np.ma.array(A(shape=0)) + test = np.ma.compressed(a) + assert_(type(test) is A) + + # Test that compress flattens + test = np.ma.compressed([[1],[2]]) + assert_equal(test.ndim, 1) + test = np.ma.compressed([[[[[1]]]]]) + assert_equal(test.ndim, 1) + + # Test case when input is MaskedArray subclass + class M(MaskedArray): + pass + + test = np.ma.compressed(M([[[]], [[]]])) + assert_equal(test.ndim, 1) + + # with .compressed() overridden + class M(MaskedArray): + def compressed(self): + return 42 + + test = np.ma.compressed(M([[[]], [[]]])) + assert_equal(test, 42) + + def test_convolve(self): + a = masked_equal(np.arange(5), 2) + b = np.array([1, 1]) + + result = masked_equal([0, 1, -1, -1, 7, 4], -1) + test = np.ma.convolve(a, b, mode='full') + assert_equal(test, result) + + test = np.ma.convolve(a, b, mode='same') + assert_equal(test, result[:-1]) + + test = np.ma.convolve(a, b, mode='valid') + assert_equal(test, result[1:-1]) + + result = masked_equal([0, 1, 1, 3, 7, 4], -1) + test = np.ma.convolve(a, b, mode='full', propagate_mask=False) + assert_equal(test, result) + + test = np.ma.convolve(a, b, mode='same', propagate_mask=False) + assert_equal(test, result[:-1]) + + test = np.ma.convolve(a, b, mode='valid', propagate_mask=False) + assert_equal(test, result[1:-1]) + + test = np.ma.convolve([1, 1], [1, 1, 1]) + assert_equal(test, masked_equal([1, 2, 2, 1], -1)) + + a = [1, 1] + b = masked_equal([1, -1, -1, 1], -1) + test = np.ma.convolve(a, b, propagate_mask=False) + assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1)) + test = np.ma.convolve(a, b, propagate_mask=True) + assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1)) + + +class TestMaskedFields: + + def setup_method(self): + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = ['one', 'two', 'three', 'four', 'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mdtype = [('a', bool), ('b', bool), ('c', bool)] + mask = [0, 1, 0, 0, 1] + base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype) + + def test_set_records_masks(self): + base = self.data['base'] + mdtype = self.data['mdtype'] + # Set w/ nomask or masked + base.mask = nomask + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = masked + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ simple boolean + base.mask = False + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = True + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ list + base.mask = [0, 0, 0, 1, 1] + assert_equal_records(base._mask, + np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], + dtype=mdtype)) + + def test_set_record_element(self): + # Check setting an element of a record) + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[0] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 2, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'two', b'three', b'four', b'five']) + + def test_set_record_slice(self): + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[:3] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 3, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'pi', b'pi', b'four', b'five']) + + def test_mask_element(self): + "Check record access" + base = self.data['base'] + base[0] = masked + + for n in ('a', 'b', 'c'): + assert_equal(base[n].mask, [1, 1, 0, 0, 1]) + assert_equal(base[n]._data, base._data[n]) + + def test_getmaskarray(self): + # Test getmaskarray on flexible dtype + ndtype = [('a', int), ('b', float)] + test = empty(3, dtype=ndtype) + assert_equal(getmaskarray(test), + np.array([(0, 0), (0, 0), (0, 0)], + dtype=[('a', '|b1'), ('b', '|b1')])) + test[:] = masked + assert_equal(getmaskarray(test), + np.array([(1, 1), (1, 1), (1, 1)], + dtype=[('a', '|b1'), ('b', '|b1')])) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + # Transform globally to simple dtype + test = a.view(float) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + # Transform globally to dty + test = a.view((float, 2)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + + def test_getitem(self): + ndtype = [('a', float), ('b', float)] + a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype) + a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])), + dtype=[('a', bool), ('b', bool)]) + + def _test_index(i): + assert_equal(type(a[i]), mvoid) + assert_equal_records(a[i]._data, a._data[i]) + assert_equal_records(a[i]._mask, a._mask[i]) + + assert_equal(type(a[i, ...]), MaskedArray) + assert_equal_records(a[i,...]._data, a._data[i,...]) + assert_equal_records(a[i,...]._mask, a._mask[i,...]) + + _test_index(1) # No mask + _test_index(0) # One element masked + _test_index(-2) # All element masked + + def test_setitem(self): + # Issue 4866: check that one can set individual items in [record][col] + # and [col][record] order + ndtype = np.dtype([('a', float), ('b', int)]) + ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype) + ma['a'][1] = 3.0 + assert_equal(ma['a'], np.array([1.0, 3.0])) + ma[1]['a'] = 4.0 + assert_equal(ma['a'], np.array([1.0, 4.0])) + # Issue 2403 + mdtype = np.dtype([('a', bool), ('b', bool)]) + # soft mask + control = np.array([(False, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a[0]['a'] = 2 + assert_equal(a.mask, control) + # hard mask + control = np.array([(True, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a[0]['a'] = 2 + assert_equal(a.mask, control) + + def test_setitem_scalar(self): + # 8510 + mask_0d = np.ma.masked_array(1, mask=True) + arr = np.ma.arange(3) + arr[0] = mask_0d + assert_array_equal(arr.mask, [True, False, False]) + + def test_element_len(self): + # check that len() works for mvoid (Github issue #576) + for rec in self.data['base']: + assert_equal(len(rec), len(self.data['ddtype'])) + + +class TestMaskedObjectArray: + + def test_getitem(self): + arr = np.ma.array([None, None]) + for dt in [float, object]: + a0 = np.eye(2).astype(dt) + a1 = np.eye(3).astype(dt) + arr[0] = a0 + arr[1] = a1 + + assert_(arr[0] is a0) + assert_(arr[1] is a1) + assert_(isinstance(arr[0,...], MaskedArray)) + assert_(isinstance(arr[1,...], MaskedArray)) + assert_(arr[0,...][()] is a0) + assert_(arr[1,...][()] is a1) + + arr[0] = np.ma.masked + + assert_(arr[1] is a1) + assert_(isinstance(arr[0,...], MaskedArray)) + assert_(isinstance(arr[1,...], MaskedArray)) + assert_equal(arr[0,...].mask, True) + assert_(arr[1,...][()] is a1) + + # gh-5962 - object arrays of arrays do something special + assert_equal(arr[0].data, a0) + assert_equal(arr[0].mask, True) + assert_equal(arr[0,...][()].data, a0) + assert_equal(arr[0,...][()].mask, True) + + def test_nested_ma(self): + + arr = np.ma.array([None, None]) + # set the first object to be an unmasked masked constant. A little fiddly + arr[0,...] = np.array([np.ma.masked], object)[0,...] + + # check the above line did what we were aiming for + assert_(arr.data[0] is np.ma.masked) + + # test that getitem returned the value by identity + assert_(arr[0] is np.ma.masked) + + # now mask the masked value! + arr[0] = np.ma.masked + assert_(arr[0] is np.ma.masked) + + +class TestMaskedView: + + def setup_method(self): + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + self.data = (data, a, controlmask) + + def test_view_to_nothing(self): + (data, a, controlmask) = self.data + test = a.view() + assert_(isinstance(test, MaskedArray)) + assert_equal(test._data, a._data) + assert_equal(test._mask, a._mask) + + def test_view_to_type(self): + (data, a, controlmask) = self.data + test = a.view(np.ndarray) + assert_(not isinstance(test, MaskedArray)) + assert_equal(test, a._data) + assert_equal_records(test, data.view(a.dtype).squeeze()) + + def test_view_to_simple_dtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view(float) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + + def test_view_to_flexible_dtype(self): + (data, a, controlmask) = self.data + + test = a.view([('A', float), ('B', float)]) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a']) + assert_equal(test['B'], a['b']) + + test = a[0].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][0]) + assert_equal(test['B'], a['b'][0]) + + test = a[-1].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][-1]) + assert_equal(test['B'], a['b'][-1]) + + def test_view_to_subdtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + # View on 1 masked element + test = a[0].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[0]) + assert_equal(test.mask, (1, 0)) + # View on 1 unmasked element + test = a[-1].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[-1]) + + def test_view_to_dtype_and_type(self): + (data, a, controlmask) = self.data + + test = a.view((float, 2), np.recarray) + assert_equal(test, data) + assert_(isinstance(test, np.recarray)) + assert_(not isinstance(test, MaskedArray)) + + +class TestOptionalArgs: + def test_ndarrayfuncs(self): + # test axis arg behaves the same as ndarray (including multiple axes) + + d = np.arange(24.0).reshape((2,3,4)) + m = np.zeros(24, dtype=bool).reshape((2,3,4)) + # mask out last element of last dimension + m[:,:,-1] = True + a = np.ma.array(d, mask=m) + + def testaxis(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test axis arg + assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1)) + assert_equal(ma_f(a, axis=(0,1))[...,:-1], + numpy_f(d[...,:-1], axis=(0,1))) + + def testkeepdims(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test keepdims arg + assert_equal(ma_f(a, keepdims=True).shape, + numpy_f(d, keepdims=True).shape) + assert_equal(ma_f(a, keepdims=False).shape, + numpy_f(d, keepdims=False).shape) + + # test both at once + assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1], + numpy_f(d[...,:-1], axis=1, keepdims=True)) + assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1], + numpy_f(d[...,:-1], axis=(0,1), keepdims=True)) + + for f in ['sum', 'prod', 'mean', 'var', 'std']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + for f in ['min', 'max']: + testaxis(f, a, d) + + d = (np.arange(24).reshape((2,3,4))%2 == 0) + a = np.ma.array(d, mask=m) + for f in ['all', 'any']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + def test_count(self): + # test np.ma.count specially + + d = np.arange(24.0).reshape((2,3,4)) + m = np.zeros(24, dtype=bool).reshape((2,3,4)) + m[:,0,:] = True + a = np.ma.array(d, mask=m) + + assert_equal(count(a), 16) + assert_equal(count(a, axis=1), 2*ones((2,4))) + assert_equal(count(a, axis=(0,1)), 4*ones((4,))) + assert_equal(count(a, keepdims=True), 16*ones((1,1,1))) + assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4))) + assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4))) + assert_equal(count(a, axis=-2), 2*ones((2,4))) + assert_raises(ValueError, count, a, axis=(1,1)) + assert_raises(AxisError, count, a, axis=3) + + # check the 'nomask' path + a = np.ma.array(d, mask=nomask) + + assert_equal(count(a), 24) + assert_equal(count(a, axis=1), 3*ones((2,4))) + assert_equal(count(a, axis=(0,1)), 6*ones((4,))) + assert_equal(count(a, keepdims=True), 24*ones((1,1,1))) + assert_equal(np.ndim(count(a, keepdims=True)), 3) + assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4))) + assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4))) + assert_equal(count(a, axis=-2), 3*ones((2,4))) + assert_raises(ValueError, count, a, axis=(1,1)) + assert_raises(AxisError, count, a, axis=3) + + # check the 'masked' singleton + assert_equal(count(np.ma.masked), 0) + + # check 0-d arrays do not allow axis > 0 + assert_raises(AxisError, count, np.ma.array(1), axis=1) + + +class TestMaskedConstant: + def _do_add_test(self, add): + # sanity check + assert_(add(np.ma.masked, 1) is np.ma.masked) + + # now try with a vector + vector = np.array([1, 2, 3]) + result = add(np.ma.masked, vector) + + # lots of things could go wrong here + assert_(result is not np.ma.masked) + assert_(not isinstance(result, np.ma.core.MaskedConstant)) + assert_equal(result.shape, vector.shape) + assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool)) + + def test_ufunc(self): + self._do_add_test(np.add) + + def test_operator(self): + self._do_add_test(lambda a, b: a + b) + + def test_ctor(self): + m = np.ma.array(np.ma.masked) + + # most importantly, we do not want to create a new MaskedConstant + # instance + assert_(not isinstance(m, np.ma.core.MaskedConstant)) + assert_(m is not np.ma.masked) + + def test_repr(self): + # copies should not exist, but if they do, it should be obvious that + # something is wrong + assert_equal(repr(np.ma.masked), 'masked') + + # create a new instance in a weird way + masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant) + assert_not_equal(repr(masked2), 'masked') + + def test_pickle(self): + from io import BytesIO + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + with BytesIO() as f: + pickle.dump(np.ma.masked, f, protocol=proto) + f.seek(0) + res = pickle.load(f) + assert_(res is np.ma.masked) + + def test_copy(self): + # gh-9328 + # copy is a no-op, like it is with np.True_ + assert_equal( + np.ma.masked.copy() is np.ma.masked, + np.True_.copy() is np.True_) + + def test__copy(self): + import copy + assert_( + copy.copy(np.ma.masked) is np.ma.masked) + + def test_deepcopy(self): + import copy + assert_( + copy.deepcopy(np.ma.masked) is np.ma.masked) + + def test_immutable(self): + orig = np.ma.masked + assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1) + assert_raises(ValueError,operator.setitem, orig.data, (), 1) + assert_raises(ValueError, operator.setitem, orig.mask, (), False) + + view = np.ma.masked.view(np.ma.MaskedArray) + assert_raises(ValueError, operator.setitem, view, (), 1) + assert_raises(ValueError, operator.setitem, view.data, (), 1) + assert_raises(ValueError, operator.setitem, view.mask, (), False) + + def test_coercion_int(self): + a_i = np.zeros((), int) + assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked) + assert_raises(MaskError, int, np.ma.masked) + + def test_coercion_float(self): + a_f = np.zeros((), float) + assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + assert_(np.isnan(a_f[()])) + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_unicode(self): + a_u = np.zeros((), 'U10') + a_u[()] = np.ma.masked + assert_equal(a_u[()], '--') + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_bytes(self): + a_b = np.zeros((), 'S10') + a_b[()] = np.ma.masked + assert_equal(a_b[()], b'--') + + def test_subclass(self): + # https://github.com/astropy/astropy/issues/6645 + class Sub(type(np.ma.masked)): pass + + a = Sub() + assert_(a is Sub()) + assert_(a is not np.ma.masked) + assert_not_equal(repr(a), 'masked') + + def test_attributes_readonly(self): + assert_raises(AttributeError, setattr, np.ma.masked, 'shape', (1,)) + assert_raises(AttributeError, setattr, np.ma.masked, 'dtype', np.int64) + + +class TestMaskedWhereAliases: + + # TODO: Test masked_object, masked_equal, ... + + def test_masked_values(self): + res = masked_values(np.array([-32768.0]), np.int16(-32768)) + assert_equal(res.mask, [True]) + + res = masked_values(np.inf, np.inf) + assert_equal(res.mask, True) + + res = np.ma.masked_values(np.inf, -np.inf) + assert_equal(res.mask, False) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=True) + assert_(res.mask is np.ma.nomask) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=False) + assert_equal(res.mask, [False] * 4) + + +def test_masked_array(): + a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) + assert_equal(np.argwhere(a), [[1], [3]]) + +def test_masked_array_no_copy(): + # check nomask array is updated in place + a = np.ma.array([1, 2, 3, 4]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [False, False, True, False]) + # check masked array is updated in place + a = np.ma.array([1, 2, 3, 4], mask=[1, 0, 0, 0]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [True, False, True, False]) + # check masked array with masked_invalid is updated in place + a = np.ma.array([np.inf, 1, 2, 3, 4]) + _ = np.ma.masked_invalid(a, copy=False) + assert_array_equal(a.mask, [True, False, False, False, False]) + +def test_append_masked_array(): + a = np.ma.masked_equal([1,2,3], value=2) + b = np.ma.masked_equal([4,3,2], value=2) + + result = np.ma.append(a, b) + expected_data = [1, 2, 3, 4, 3, 2] + expected_mask = [False, True, False, False, False, True] + assert_array_equal(result.data, expected_data) + assert_array_equal(result.mask, expected_mask) + + a = np.ma.masked_all((2,2)) + b = np.ma.ones((3,1)) + + result = np.ma.append(a, b) + expected_data = [1] * 3 + expected_mask = [True] * 4 + [False] * 3 + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + result = np.ma.append(a, b, axis=None) + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + +def test_append_masked_array_along_axis(): + a = np.ma.masked_equal([1,2,3], value=2) + b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + + # When `axis` is specified, `values` must have the correct shape. + assert_raises(ValueError, np.ma.append, a, b, axis=0) + + result = np.ma.append(a[np.newaxis,:], b, axis=0) + expected = np.ma.arange(1, 10) + expected[[1, 6]] = np.ma.masked + expected = expected.reshape((3,3)) + assert_array_equal(result.data, expected.data) + assert_array_equal(result.mask, expected.mask) + +def test_default_fill_value_complex(): + # regression test for Python 3, where 'unicode' was not defined + assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) + + +def test_ufunc_with_output(): + # check that giving an output argument always returns that output. + # Regression test for gh-8416. + x = array([1., 2., 3.], mask=[0, 0, 1]) + y = np.add(x, 1., out=x) + assert_(y is x) + + +def test_ufunc_with_out_varied(): + """ Test that masked arrays are immune to gh-10459 """ + # the mask of the output should not affect the result, however it is passed + a = array([ 1, 2, 3], mask=[1, 0, 0]) + b = array([10, 20, 30], mask=[1, 0, 0]) + out = array([ 0, 0, 0], mask=[0, 0, 1]) + expected = array([11, 22, 33], mask=[1, 0, 0]) + + out_pos = out.copy() + res_pos = np.add(a, b, out_pos) + + out_kw = out.copy() + res_kw = np.add(a, b, out=out_kw) + + out_tup = out.copy() + res_tup = np.add(a, b, out=(out_tup,)) + + assert_equal(res_kw.mask, expected.mask) + assert_equal(res_kw.data, expected.data) + assert_equal(res_tup.mask, expected.mask) + assert_equal(res_tup.data, expected.data) + assert_equal(res_pos.mask, expected.mask) + assert_equal(res_pos.data, expected.data) + + +def test_astype_mask_ordering(): + descr = np.dtype([('v', int, 3), ('x', [('y', float)])]) + x = array([ + [([1, 2, 3], (1.0,)), ([1, 2, 3], (2.0,))], + [([1, 2, 3], (3.0,)), ([1, 2, 3], (4.0,))]], dtype=descr) + x[0]['v'][0] = np.ma.masked + + x_a = x.astype(descr) + assert x_a.dtype.names == np.dtype(descr).names + assert x_a.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a) + + assert_(x is x.astype(x.dtype, copy=False)) + assert_equal(type(x.astype(x.dtype, subok=False)), np.ndarray) + + x_f = x.astype(x.dtype, order='F') + assert_(x_f.flags.f_contiguous) + assert_(x_f.mask.flags.f_contiguous) + + # Also test the same indirectly, via np.array + x_a2 = np.array(x, dtype=descr, subok=True) + assert x_a2.dtype.names == np.dtype(descr).names + assert x_a2.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a2) + + assert_(x is np.array(x, dtype=descr, copy=None, subok=True)) + + x_f2 = np.array(x, dtype=x.dtype, order='F', subok=True) + assert_(x_f2.flags.f_contiguous) + assert_(x_f2.mask.flags.f_contiguous) + + +@pytest.mark.parametrize('dt1', num_dts, ids=num_ids) +@pytest.mark.parametrize('dt2', num_dts, ids=num_ids) +@pytest.mark.filterwarnings('ignore::numpy.exceptions.ComplexWarning') +def test_astype_basic(dt1, dt2): + # See gh-12070 + src = np.ma.array(ones(3, dt1), fill_value=1) + dst = src.astype(dt2) + + assert_(src.fill_value == 1) + assert_(src.dtype == dt1) + assert_(src.fill_value.dtype == dt1) + + assert_(dst.fill_value == 1) + assert_(dst.dtype == dt2) + assert_(dst.fill_value.dtype == dt2) + + assert_equal(src, dst) + + +def test_fieldless_void(): + dt = np.dtype([]) # a void dtype with no fields + x = np.empty(4, dt) + + # these arrays contain no values, so there's little to test - but this + # shouldn't crash + mx = np.ma.array(x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + mx = np.ma.array(x, mask=x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + +def test_mask_shape_assignment_does_not_break_masked(): + a = np.ma.masked + b = np.ma.array(1, mask=a.mask) + b.shape = (1,) + assert_equal(a.mask.shape, ()) + +@pytest.mark.skipif(sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") +def test_doc_note(): + def method(self): + """This docstring + + Has multiple lines + + And notes + + Notes + ----- + original note + """ + pass + + expected_doc = """This docstring + +Has multiple lines + +And notes + +Notes +----- +note + +original note""" + + assert_equal(np.ma.core.doc_note(method.__doc__, "note"), expected_doc) + + +def test_gh_22556(): + source = np.ma.array([0, [0, 1, 2]], dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[1].append('this should not appear in source') + assert len(source[1]) == 3 + + +def test_gh_21022(): + # testing for absence of reported error + source = np.ma.masked_array(data=[-1, -1], mask=True, dtype=np.float64) + axis = np.array(0) + result = np.prod(source, axis=axis, keepdims=False) + result = np.ma.masked_array(result, + mask=np.ones(result.shape, dtype=np.bool)) + array = np.ma.masked_array(data=-1, mask=True, dtype=np.float64) + copy.deepcopy(array) + copy.deepcopy(result) + + +def test_deepcopy_2d_obj(): + source = np.ma.array([[0, "dog"], + [1, 1], + [[1, 2], "cat"]], + mask=[[0, 1], + [0, 0], + [0, 0]], + dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[2, 0].extend(['this should not appear in source', 3]) + assert len(source[2, 0]) == 2 + assert len(deepcopy[2, 0]) == 4 + assert_equal(deepcopy._mask, source._mask) + deepcopy._mask[0, 0] = 1 + assert source._mask[0, 0] == 0 + + +def test_deepcopy_0d_obj(): + source = np.ma.array(0, mask=[0], dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[...] = 17 + assert_equal(source, 0) + assert_equal(deepcopy, 17) diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/test_deprecations.py b/phivenv/Lib/site-packages/numpy/ma/tests/test_deprecations.py new file mode 100644 index 0000000000000000000000000000000000000000..f7b44a25589888bbe546155dfd9a383f08965b51 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/tests/test_deprecations.py @@ -0,0 +1,84 @@ +"""Test deprecation and future warnings. + +""" +import pytest +import numpy as np +from numpy.testing import assert_warns +from numpy.ma.testutils import assert_equal +from numpy.ma.core import MaskedArrayFutureWarning +import io +import textwrap + +class TestArgsort: + """ gh-8701 """ + def _test_base(self, argsort, cls): + arr_0d = np.array(1).view(cls) + argsort(arr_0d) + + arr_1d = np.array([1, 2, 3]).view(cls) + argsort(arr_1d) + + # argsort has a bad default for >1d arrays + arr_2d = np.array([[1, 2], [3, 4]]).view(cls) + result = assert_warns( + np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) + assert_equal(result, argsort(arr_2d, axis=None)) + + # should be no warnings for explicitly specifying it + argsort(arr_2d, axis=None) + argsort(arr_2d, axis=-1) + + def test_function_ndarray(self): + return self._test_base(np.ma.argsort, np.ndarray) + + def test_function_maskedarray(self): + return self._test_base(np.ma.argsort, np.ma.MaskedArray) + + def test_method(self): + return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray) + + +class TestMinimumMaximum: + + def test_axis_default(self): + # NumPy 1.13, 2017-05-06 + + data1d = np.ma.arange(6) + data2d = data1d.reshape(2, 3) + + ma_min = np.ma.minimum.reduce + ma_max = np.ma.maximum.reduce + + # check that the default axis is still None, but warns on 2d arrays + result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) + assert_equal(result, ma_max(data2d, axis=None)) + + result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) + assert_equal(result, ma_min(data2d, axis=None)) + + # no warnings on 1d, as both new and old defaults are equivalent + result = ma_min(data1d) + assert_equal(result, ma_min(data1d, axis=None)) + assert_equal(result, ma_min(data1d, axis=0)) + + result = ma_max(data1d) + assert_equal(result, ma_max(data1d, axis=None)) + assert_equal(result, ma_max(data1d, axis=0)) + + +class TestFromtextfile: + def test_fromtextfile_delimitor(self): + # NumPy 1.22.0, 2021-09-23 + + textfile = io.StringIO(textwrap.dedent( + """ + A,B,C,D + 'string 1';1;1.0;'mixed column' + 'string 2';2;2.0; + 'string 3';3;3.0;123 + 'string 4';4;4.0;3.14 + """ + )) + + with pytest.warns(DeprecationWarning): + result = np.ma.mrecords.fromtextfile(textfile, delimitor=';') diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/test_extras.py b/phivenv/Lib/site-packages/numpy/ma/tests/test_extras.py new file mode 100644 index 0000000000000000000000000000000000000000..4f1032537b77456317dbb71b8b9f47fb7587ad5b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/tests/test_extras.py @@ -0,0 +1,1921 @@ +# pylint: disable-msg=W0611, W0612, W0511 +"""Tests suite for MaskedArray. +Adapted from the original test_ma by Pierre Gerard-Marchant + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +import warnings +import itertools +import pytest + +import numpy as np +from numpy._core.numeric import normalize_axis_tuple +from numpy.testing import ( + assert_warns, suppress_warnings + ) +from numpy.ma.testutils import ( + assert_, assert_array_equal, assert_equal, assert_almost_equal + ) +from numpy.ma.core import ( + array, arange, masked, MaskedArray, masked_array, getmaskarray, shape, + nomask, ones, zeros, count + ) +from numpy.ma.extras import ( + atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef, + median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d, + ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols, + mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous, + notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin, + diagflat, ndenumerate, stack, vstack + ) + + +class TestGeneric: + # + def test_masked_all(self): + # Tests masked_all + # Standard dtype + test = masked_all((2,), dtype=float) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + test = masked_all((2,), dtype=dt) + control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + test = masked_all((2, 2), dtype=dt) + control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]], + mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]], + dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((1, 1), dtype=dt) + control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt) + assert_equal(test, control) + + def test_masked_all_with_object_nested(self): + # Test masked_all works with nested array with dtype of an 'object' + # refers to issue #15895 + my_dtype = np.dtype([('b', ([('c', object)], (1,)))]) + masked_arr = np.ma.masked_all((1,), my_dtype) + + assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray) + assert_equal(type(masked_arr['b']['c']), np.ma.core.MaskedArray) + assert_equal(len(masked_arr['b']['c']), 1) + assert_equal(masked_arr['b']['c'].shape, (1, 1)) + assert_equal(masked_arr['b']['c']._fill_value.shape, ()) + + def test_masked_all_with_object(self): + # same as above except that the array is not nested + my_dtype = np.dtype([('b', (object, (1,)))]) + masked_arr = np.ma.masked_all((1,), my_dtype) + + assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray) + assert_equal(len(masked_arr['b']), 1) + assert_equal(masked_arr['b'].shape, (1, 1)) + assert_equal(masked_arr['b']._fill_value.shape, ()) + + def test_masked_all_like(self): + # Tests masked_all + # Standard dtype + base = array([1, 2], dtype=float) + test = masked_all_like(base) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + test = masked_all_like(base) + control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + test = masked_all_like(control) + assert_equal(test, control) + + def check_clump(self, f): + for i in range(1, 7): + for j in range(2**i): + k = np.arange(i, dtype=int) + ja = np.full(i, j, dtype=int) + a = masked_array(2**k) + a.mask = (ja & (2**k)) != 0 + s = 0 + for sl in f(a): + s += a.data[sl].sum() + if f == clump_unmasked: + assert_equal(a.compressed().sum(), s) + else: + a.mask = ~a.mask + assert_equal(a.compressed().sum(), s) + + def test_clump_masked(self): + # Test clump_masked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + # + test = clump_masked(a) + control = [slice(0, 3), slice(6, 7), slice(8, 10)] + assert_equal(test, control) + + self.check_clump(clump_masked) + + def test_clump_unmasked(self): + # Test clump_unmasked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + test = clump_unmasked(a) + control = [slice(3, 6), slice(7, 8), ] + assert_equal(test, control) + + self.check_clump(clump_unmasked) + + def test_flatnotmasked_contiguous(self): + # Test flatnotmasked_contiguous + a = arange(10) + # No mask + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(0, a.size)]) + # mask of all false + a.mask = np.zeros(10, dtype=bool) + assert_equal(test, [slice(0, a.size)]) + # Some mask + a[(a < 3) | (a > 8) | (a == 5)] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(3, 5), slice(6, 9)]) + # + a[:] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, []) + + +class TestAverage: + # Several tests of average. Why so many ? Good point... + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + assert_equal(2.0, average(ott, axis=0)) + assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) + assert_equal(2.0, result) + assert_(wts == 4.0) + ott[:] = masked + assert_equal(average(ott, axis=0).mask, [True]) + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_equal(average(ott, axis=0), [2.0, 0.0]) + assert_equal(average(ott, axis=1).mask[0], [True]) + assert_equal([2., 0.], average(ott, axis=0)) + result, wts = average(ott, axis=0, returned=True) + assert_equal(wts, [1., 0.]) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6, dtype=np.float64) + assert_equal(average(x, axis=0), 2.5) + assert_equal(average(x, axis=0, weights=w1), 2.5) + y = array([arange(6, dtype=np.float64), 2.0 * arange(6)]) + assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.) + assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + assert_equal(average(y, None, weights=w2), 20. / 6.) + assert_equal(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.]) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_equal(average(masked_array(x, m1), axis=0), 2.5) + assert_equal(average(masked_array(x, m2), axis=0), 2.5) + assert_equal(average(masked_array(x, m4), axis=0).mask, [True]) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_equal(average(z, None), 20. / 6.) + assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) + assert_equal(average(z, axis=1), [2.5, 5.0]) + assert_equal(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0]) + + def test_testAverage3(self): + # Yet more tests of average! + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[False, False], [True, False]]) + a2da = average(a2d, axis=0) + assert_equal(a2da, [0.5, 3.0]) + a2dma = average(a2dm, axis=0) + assert_equal(a2dma, [1.0, 3.0]) + a2dma = average(a2dm, axis=None) + assert_equal(a2dma, 7. / 3.) + a2dma = average(a2dm, axis=1) + assert_equal(a2dma, [1.5, 4.0]) + + def test_testAverage4(self): + # Test that `keepdims` works with average + x = np.array([2, 3, 4]).reshape(3, 1) + b = np.ma.array(x, mask=[[False], [False], [True]]) + w = np.array([4, 5, 6]).reshape(3, 1) + actual = average(b, weights=w, axis=1, keepdims=True) + desired = masked_array([[2.], [3.], [4.]], [[False], [False], [True]]) + assert_equal(actual, desired) + + def test_weight_and_input_dims_different(self): + # this test mirrors a test for np.average() + # in lib/test/test_function_base.py + y = np.arange(12).reshape(2, 2, 3) + w = np.array([0., 0., 1., .5, .5, 0., 0., .5, .5, 1., 0., 0.])\ + .reshape(2, 2, 3) + + m = np.full((2, 2, 3), False) + yma = np.ma.array(y, mask=m) + subw0 = w[:, :, 0] + + actual = average(yma, axis=(0, 1), weights=subw0) + desired = masked_array([7., 8., 9.], mask=[False, False, False]) + assert_almost_equal(actual, desired) + + m = np.full((2, 2, 3), False) + m[:, :, 0] = True + m[0, 0, 1] = True + yma = np.ma.array(y, mask=m) + actual = average(yma, axis=(0, 1), weights=subw0) + desired = masked_array( + [np.nan, 8., 9.], + mask=[True, False, False]) + assert_almost_equal(actual, desired) + + m = np.full((2, 2, 3), False) + yma = np.ma.array(y, mask=m) + + subw1 = w[1, :, :] + actual = average(yma, axis=(1, 2), weights=subw1) + desired = masked_array([2.25, 8.25], mask=[False, False]) + assert_almost_equal(actual, desired) + + # here the weights have the wrong shape for the specified axes + with pytest.raises( + ValueError, + match="Shape of weights must be consistent with " + "shape of a along specified axis"): + average(yma, axis=(0, 1, 2), weights=subw0) + + with pytest.raises( + ValueError, + match="Shape of weights must be consistent with " + "shape of a along specified axis"): + average(yma, axis=(0, 1), weights=subw1) + + # swapping the axes should be same as transposing weights + actual = average(yma, axis=(1, 0), weights=subw0) + desired = average(yma, axis=(0, 1), weights=subw0.T) + assert_almost_equal(actual, desired) + + def test_onintegers_with_mask(self): + # Test average on integers with mask + a = average(array([1, 2])) + assert_equal(a, 1.5) + a = average(array([1, 2, 3, 4], mask=[False, False, True, True])) + assert_equal(a, 1.5) + + def test_complex(self): + # Test with complex data. + # (Regression test for https://github.com/numpy/numpy/issues/2684) + mask = np.array([[0, 0, 0, 1, 0], + [0, 1, 0, 0, 0]], dtype=bool) + a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j], + [9j, 0+1j, 2+3j, 4+5j, 7+7j]], + mask=mask) + + av = average(a) + expected = np.average(a.compressed()) + assert_almost_equal(av.real, expected.real) + assert_almost_equal(av.imag, expected.imag) + + av0 = average(a, axis=0) + expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j + assert_almost_equal(av0.real, expected0.real) + assert_almost_equal(av0.imag, expected0.imag) + + av1 = average(a, axis=1) + expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j + assert_almost_equal(av1.real, expected1.real) + assert_almost_equal(av1.imag, expected1.imag) + + # Test with the 'weights' argument. + wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5], + [1.0, 1.0, 1.0, 1.0, 1.0]]) + wav = average(a, weights=wts) + expected = np.average(a.compressed(), weights=wts[~mask]) + assert_almost_equal(wav.real, expected.real) + assert_almost_equal(wav.imag, expected.imag) + + wav0 = average(a, weights=wts, axis=0) + expected0 = (average(a.real, weights=wts, axis=0) + + average(a.imag, weights=wts, axis=0)*1j) + assert_almost_equal(wav0.real, expected0.real) + assert_almost_equal(wav0.imag, expected0.imag) + + wav1 = average(a, weights=wts, axis=1) + expected1 = (average(a.real, weights=wts, axis=1) + + average(a.imag, weights=wts, axis=1)*1j) + assert_almost_equal(wav1.real, expected1.real) + assert_almost_equal(wav1.imag, expected1.imag) + + @pytest.mark.parametrize( + 'x, axis, expected_avg, weights, expected_wavg, expected_wsum', + [([1, 2, 3], None, [2.0], [3, 4, 1], [1.75], [8.0]), + ([[1, 2, 5], [1, 6, 11]], 0, [[1.0, 4.0, 8.0]], + [1, 3], [[1.0, 5.0, 9.5]], [[4, 4, 4]])], + ) + def test_basic_keepdims(self, x, axis, expected_avg, + weights, expected_wavg, expected_wsum): + avg = np.ma.average(x, axis=axis, keepdims=True) + assert avg.shape == np.shape(expected_avg) + assert_array_equal(avg, expected_avg) + + wavg = np.ma.average(x, axis=axis, weights=weights, keepdims=True) + assert wavg.shape == np.shape(expected_wavg) + assert_array_equal(wavg, expected_wavg) + + wavg, wsum = np.ma.average(x, axis=axis, weights=weights, + returned=True, keepdims=True) + assert wavg.shape == np.shape(expected_wavg) + assert_array_equal(wavg, expected_wavg) + assert wsum.shape == np.shape(expected_wsum) + assert_array_equal(wsum, expected_wsum) + + def test_masked_weights(self): + # Test with masked weights. + # (Regression test for https://github.com/numpy/numpy/issues/10438) + a = np.ma.array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [1, 0, 0], [0, 0, 0]]) + weights_unmasked = masked_array([5, 28, 31], mask=False) + weights_masked = masked_array([5, 28, 31], mask=[1, 0, 0]) + + avg_unmasked = average(a, axis=0, + weights=weights_unmasked, returned=False) + expected_unmasked = np.array([6.0, 5.21875, 6.21875]) + assert_almost_equal(avg_unmasked, expected_unmasked) + + avg_masked = average(a, axis=0, weights=weights_masked, returned=False) + expected_masked = np.array([6.0, 5.576271186440678, 6.576271186440678]) + assert_almost_equal(avg_masked, expected_masked) + + # weights should be masked if needed + # depending on the array mask. This is to avoid summing + # masked nan or other values that are not cancelled by a zero + a = np.ma.array([1.0, 2.0, 3.0, 4.0], + mask=[False, False, True, True]) + avg_unmasked = average(a, weights=[1, 1, 1, np.nan]) + + assert_almost_equal(avg_unmasked, 1.5) + + a = np.ma.array([ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 1.0, 2.0, 3.0], + ], mask=[ + [False, True, True, False], + [True, False, True, True], + [True, False, True, False], + ]) + + avg_masked = np.ma.average(a, weights=[1, np.nan, 1], axis=0) + avg_expected = np.ma.array([1.0, np.nan, np.nan, 3.5], + mask=[False, True, True, False]) + + assert_almost_equal(avg_masked, avg_expected) + assert_equal(avg_masked.mask, avg_expected.mask) + + +class TestConcatenator: + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_1d(self): + # Tests mr_ on 1D arrays. + assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) + b = ones(5) + m = [1, 0, 0, 0, 0] + d = masked_array(b, mask=m) + c = mr_[d, 0, 0, d] + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + assert_array_equal(c.mask, mr_[m, 0, 0, m]) + + def test_2d(self): + # Tests mr_ on 2D arrays. + a_1 = np.random.rand(5, 5) + a_2 = np.random.rand(5, 5) + m_1 = np.round(np.random.rand(5, 5), 0) + m_2 = np.round(np.random.rand(5, 5), 0) + b_1 = masked_array(a_1, mask=m_1) + b_2 = masked_array(a_2, mask=m_2) + # append columns + d = mr_['1', b_1, b_2] + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b_1) + assert_array_equal(d[:, 5:], b_2) + assert_array_equal(d.mask, np.r_['1', m_1, m_2]) + d = mr_[b_1, b_2] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5,:], b_1) + assert_array_equal(d[5:,:], b_2) + assert_array_equal(d.mask, np.r_[m_1, m_2]) + + def test_masked_constant(self): + actual = mr_[np.ma.masked, 1] + assert_equal(actual.mask, [True, False]) + assert_equal(actual.data[1], 1) + + actual = mr_[[1, 2], np.ma.masked] + assert_equal(actual.mask, [False, False, True]) + assert_equal(actual.data[:2], [1, 2]) + + +class TestNotMasked: + # Tests notmasked_edges and notmasked_contiguous. + + def test_edges(self): + # Tests unmasked_edges + data = masked_array(np.arange(25).reshape(5, 5), + mask=[[0, 0, 1, 0, 0], + [0, 0, 0, 1, 1], + [1, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + [1, 1, 1, 0, 0]],) + test = notmasked_edges(data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, 1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)]) + # + test = notmasked_edges(data.data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data.data, 0) + assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data.data, -1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)]) + # + data[-2] = masked + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, -1) + assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)]) + assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)]) + + def test_contiguous(self): + # Tests notmasked_contiguous + a = masked_array(np.arange(24).reshape(3, 8), + mask=[[0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 0]]) + tmp = notmasked_contiguous(a, None) + assert_equal(tmp, [ + slice(0, 4, None), + slice(16, 22, None), + slice(23, 24, None) + ]) + + tmp = notmasked_contiguous(a, 0) + assert_equal(tmp, [ + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(2, 3, None)], + [slice(2, 3, None)], + [], + [slice(2, 3, None)] + ]) + # + tmp = notmasked_contiguous(a, 1) + assert_equal(tmp, [ + [slice(0, 4, None)], + [], + [slice(0, 6, None), slice(7, 8, None)] + ]) + + +class TestCompressFunctions: + + def test_compress_nd(self): + # Tests compress_nd + x = np.array(list(range(3*4*5))).reshape(3, 4, 5) + m = np.zeros((3,4,5)).astype(bool) + m[1,1,1] = True + x = array(x, mask=m) + + # axis=None + a = compress_nd(x) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + # axis=0 + a = compress_nd(x, 0) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [45, 46, 47, 48, 49], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + # axis=1 + a = compress_nd(x, 1) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[20, 21, 22, 23, 24], + [30, 31, 32, 33, 34], + [35, 36, 37, 38, 39]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + a2 = compress_nd(x, (1,)) + a3 = compress_nd(x, -2) + a4 = compress_nd(x, (-2,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=2 + a = compress_nd(x, 2) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [25, 27, 28, 29], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (2,)) + a3 = compress_nd(x, -1) + a4 = compress_nd(x, (-1,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 1) + a = compress_nd(x, (0, 1)) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + a2 = compress_nd(x, (0, -2)) + assert_equal(a, a2) + + # axis=(1, 2) + a = compress_nd(x, (1, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (-2, 2)) + a3 = compress_nd(x, (1, -1)) + a4 = compress_nd(x, (-2, -1)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 2) + a = compress_nd(x, (0, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (0, -1)) + assert_equal(a, a2) + + def test_compress_rowcols(self): + # Tests compress_rowcols + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) + assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) + assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[8]]) + assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) + assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_equal(compress_rowcols(x).size, 0) + assert_equal(compress_rowcols(x, 0).size, 0) + assert_equal(compress_rowcols(x, 1).size, 0) + + def test_mask_rowcols(self): + # Tests mask_rowcols. + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1,).mask, + [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_(mask_rowcols(x).all() is masked) + assert_(mask_rowcols(x, 0).all() is masked) + assert_(mask_rowcols(x, 1).all() is masked) + assert_(mask_rowcols(x).mask.all()) + assert_(mask_rowcols(x, 0).mask.all()) + assert_(mask_rowcols(x, 1).mask.all()) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize(["func", "rowcols_axis"], + [(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)]) + def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): + # Test deprecation of the axis argument to `mask_rows` and `mask_cols` + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + + with assert_warns(DeprecationWarning): + res = func(x, axis=axis) + assert_equal(res, mask_rowcols(x, rowcols_axis)) + + def test_dot(self): + # Tests dot product + n = np.arange(1, 7) + # + m = [1, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [1, 0]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 1] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 1], [1, 1]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + assert_equal(c, dot(a, b)) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b) + assert_equal(c.mask, nomask) + c = dot(b, a) + assert_equal(c.mask, nomask) + # + a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 0], [1, 1]]) + c = dot(a, b) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 0], [1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[0, 0], [0, 0]], [[0, 0], [0, 1]]]) + c = dot(a, b, strict=True) + assert_equal(c.mask, + [[[[1, 1], [1, 1]], [[0, 0], [0, 1]]], + [[[0, 0], [0, 1]], [[0, 0], [0, 1]]]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, + [[[[0, 0], [0, 1]], [[0, 0], [0, 0]]], + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, + [[[[1, 0], [0, 0]], [[1, 0], [0, 0]]], + [[[1, 0], [0, 0]], [[1, 1], [1, 1]]]]) + c = dot(b, a, strict=False) + assert_equal(c.mask, + [[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], + [[[0, 0], [0, 0]], [[1, 0], [0, 0]]]]) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = 5. + c = dot(a, b, strict=True) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(b, a, strict=False) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = masked_array(np.arange(2), mask=[0, 1]) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, [[1, 0], [0, 0]]) + + def test_dot_returns_maskedarray(self): + # See gh-6611 + a = np.eye(3) + b = array(a) + assert_(type(dot(a, a)) is MaskedArray) + assert_(type(dot(a, b)) is MaskedArray) + assert_(type(dot(b, a)) is MaskedArray) + assert_(type(dot(b, b)) is MaskedArray) + + def test_dot_out(self): + a = array(np.eye(3)) + out = array(np.zeros((3, 3))) + res = dot(a, a, out=out) + assert_(res is out) + assert_equal(a, res) + + +class TestApplyAlongAxis: + # Tests 2D functions + def test_3d(self): + a = arange(12.).reshape(2, 2, 3) + + def myfunc(b): + return b[1] + + xa = apply_along_axis(myfunc, 2, a) + assert_equal(xa, [[1, 4], [7, 10]]) + + # Tests kwargs functions + def test_3d_kwargs(self): + a = arange(12).reshape(2, 2, 3) + + def myfunc(b, offset=0): + return b[1+offset] + + xa = apply_along_axis(myfunc, 2, a, offset=1) + assert_equal(xa, [[2, 5], [8, 11]]) + + +class TestApplyOverAxes: + # Tests apply_over_axes + def test_basic(self): + a = arange(24).reshape(2, 3, 4) + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[60], [92], [124]]]) + assert_equal(test, ctrl) + a[(a % 2).astype(bool)] = masked + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[28], [44], [60]]]) + assert_equal(test, ctrl) + + +class TestMedian: + def test_pytype(self): + r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1) + assert_equal(r, np.inf) + + def test_inf(self): + # test that even which computes handles inf / x = masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=-1) + assert_equal(r, np.inf) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=None) + assert_equal(r, np.inf) + # all masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=-1) + assert_equal(r.mask, True) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=None) + assert_equal(r.mask, True) + + def test_non_masked(self): + x = np.arange(9) + assert_equal(np.ma.median(x), 4.) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = range(8) + assert_equal(np.ma.median(x), 3.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = 5 + assert_equal(np.ma.median(x), 5.) + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = np.arange(9 * 8).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + # float + x = np.arange(9 * 8.).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + + def test_docstring_examples(self): + "test the examples given in the docstring of ma.median" + x = array(np.arange(8), mask=[0]*4 + [1]*4) + assert_equal(np.ma.median(x), 1.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + ma_x = np.ma.median(x, axis=-1, overwrite_input=True) + assert_equal(ma_x, [2., 5.]) + assert_equal(ma_x.shape, (2,), "shape mismatch") + assert_(type(ma_x) is MaskedArray) + + def test_axis_argument_errors(self): + msg = "mask = %s, ndim = %s, axis = %s, overwrite_input = %s" + for ndmin in range(5): + for mask in [False, True]: + x = array(1, ndmin=ndmin, mask=mask) + + # Valid axis values should not raise exception + args = itertools.product(range(-ndmin, ndmin), [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except Exception: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + # Invalid axis values should raise exception + args = itertools.product([-(ndmin + 1), ndmin], [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except np.exceptions.AxisError: + pass + else: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + def test_masked_0d(self): + # Check values + x = array(1, mask=False) + assert_equal(np.ma.median(x), 1) + x = array(1, mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + + def test_masked_1d(self): + x = array(np.arange(5), mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is np.ma.core.MaskedConstant) + x = array(np.arange(5), mask=False) + assert_equal(np.ma.median(x), 2.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0,1,0,0,0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0,1,1,1,1]) + assert_equal(np.ma.median(x), 0.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(5), mask=[0,1,1,0,0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(5.), mask=[0,1,1,0,0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(6), mask=[0,1,1,1,1,0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(6.), mask=[0,1,1,1,1,0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + + def test_1d_shape_consistency(self): + assert_equal(np.ma.median(array([1,2,3],mask=[0,0,0])).shape, + np.ma.median(array([1,2,3],mask=[0,1,0])).shape ) + + def test_2d(self): + # Tests median w/ 2D + (n, p) = (101, 30) + x = masked_array(np.linspace(-1., 1., n),) + x[:10] = x[-10:] = masked + z = masked_array(np.empty((n, p), dtype=float)) + z[:, 0] = x[:] + idx = np.arange(len(x)) + for i in range(1, p): + np.random.shuffle(idx) + z[:, i] = x[idx] + assert_equal(median(z[:, 0]), 0) + assert_equal(median(z), 0) + assert_equal(median(z, axis=0), np.zeros(p)) + assert_equal(median(z.T, axis=1), np.zeros(p)) + + def test_2d_waxis(self): + # Tests median w/ 2D arrays and different axis. + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x), 14.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + assert_equal(median(x, axis=0), [13.5, 14.5, 15.5]) + assert_(type(np.ma.median(x, axis=0)) is MaskedArray) + assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0]) + assert_(type(np.ma.median(x, axis=1)) is MaskedArray) + assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1]) + + def test_3d(self): + # Tests median w/ 3D + x = np.ma.arange(24).reshape(3, 4, 2) + x[x % 3 == 0] = masked + assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) + x.shape = (4, 3, 2) + assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) + x = np.ma.arange(24).reshape(4, 3, 2) + x[x % 5 == 0] = masked + assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]]) + + def test_neg_axis(self): + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x, axis=-1), median(x, axis=1)) + + def test_out_1d(self): + # integer float even odd + for v in (30, 30., 31, 31.): + x = masked_array(np.arange(v)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(())) + r = median(x, out=out) + if v == 30: + assert_equal(out, 14.5) + else: + assert_equal(out, 15.) + assert_(r is out) + assert_(type(r) is MaskedArray) + + def test_out(self): + # integer float even odd + for v in (40, 40., 30, 30.): + x = masked_array(np.arange(v).reshape(10, -1)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(10)) + r = median(x, axis=1, out=out) + if v == 30: + e = masked_array([0.]*3 + [10, 13, 16, 19] + [0.]*3, + mask=[True] * 3 + [False] * 4 + [True] * 3) + else: + e = masked_array([0.]*3 + [13.5, 17.5, 21.5, 25.5] + [0.]*3, + mask=[True]*3 + [False]*4 + [True]*3) + assert_equal(r, e) + assert_(r is out) + assert_(type(r) is MaskedArray) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, axis): + mask = np.zeros((3, 5, 7, 11), dtype=bool) + # Randomly set some elements to True: + w = np.random.random((4, 200)) * np.array(mask.shape)[:, None] + w = w.astype(np.intp) + mask[tuple(w)] = np.nan + d = masked_array(np.ones(mask.shape), mask=mask) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = masked_array(np.empty(shape_out)) + result = median(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_single_non_masked_value_on_axis(self): + data = [[1., 0.], + [0., 3.], + [0., 0.]] + masked_arr = np.ma.masked_equal(data, 0) + expected = [1., 3.] + assert_array_equal(np.ma.median(masked_arr, axis=0), + expected) + + def test_nan(self): + for mask in (False, np.zeros(6, dtype=bool)): + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm.mask = mask + + # scalar result + r = np.ma.median(dm, axis=None) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + r = np.ma.median(dm.ravel(), axis=0) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + + r = np.ma.median(dm, axis=0) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [1, np.nan, 3]) + r = np.ma.median(dm, axis=1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + r = np.ma.median(dm, axis=-1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm[:, 2] = np.ma.masked + assert_array_equal(np.ma.median(dm, axis=None), np.nan) + assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3]) + assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5]) + + def test_out_nan(self): + o = np.ma.masked_array(np.zeros((4,))) + d = np.ma.masked_array(np.ones((3, 4))) + d[2, 1] = np.nan + d[2, 2] = np.ma.masked + assert_equal(np.ma.median(d, 0, out=o), o) + o = np.ma.masked_array(np.zeros((3,))) + assert_equal(np.ma.median(d, 1, out=o), o) + o = np.ma.masked_array(np.zeros(())) + assert_equal(np.ma.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.ma.masked_array(np.arange(24, dtype=float)) + a[::3] = np.ma.masked + a[2] = np.nan + assert_array_equal(np.ma.median(a), np.nan) + assert_array_equal(np.ma.median(a, axis=0), np.nan) + + a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4)) + a.mask = np.arange(a.size) % 2 == 1 + aorig = a.copy() + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_array_equal(np.ma.median(a), np.nan) + assert_(np.isscalar(np.ma.median(a))) + + # axis0 + b = np.ma.median(aorig, axis=0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.ma.median(a, 0), b) + + # axis1 + b = np.ma.median(aorig, axis=1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.ma.median(a, 1), b) + + # axis02 + b = np.ma.median(aorig, axis=(0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.ma.median(a, (0, 2)), b) + + def test_ambigous_fill(self): + # 255 is max value, used as filler for sort + a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8) + a = np.ma.masked_array(a, mask=a == 3) + assert_array_equal(np.ma.median(a, axis=1), 255) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), 255) + + def test_special(self): + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a, axis=0), [inf, np.nan]) + assert_equal(np.ma.median(a, axis=1), [inf, np.nan]) + assert_equal(np.ma.median(a), inf) + + a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_array_equal(np.ma.median(a, axis=1), inf) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), inf) + + # no mask + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=0), inf) + assert_equal(np.ma.median(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + a = np.ma.masked_array(a, mask=np.isnan(a)) + if inf > 0: + assert_equal(np.ma.median(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.ma.median(a), 4.5) + else: + assert_equal(np.ma.median(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.ma.median(a), -2.5) + assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf]) + + for i in range(0, 10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=1), inf) + assert_equal(np.ma.median(a, axis=0), + ([np.nan] * i) + [inf] * j) + + def test_empty(self): + # empty arrays + a = np.ma.masked_array(np.array([], dtype=float)) + with suppress_warnings() as w: + w.record(RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_(w.log[0].category is RuntimeWarning) + + # multiple dimensions + a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) + # no axis + with suppress_warnings() as w: + w.record(RuntimeWarning) + warnings.filterwarnings('always', '', RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_(w.log[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) + assert_equal(np.ma.median(a, axis=0), b) + assert_equal(np.ma.median(a, axis=1), b) + + # axis 2 + b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.ma.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + + def test_object(self): + o = np.ma.masked_array(np.arange(7.)) + assert_(type(np.ma.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.ma.median(o.astype(object))), float) + + +class TestCov: + + def setup_method(self): + self.data = array(np.random.rand(12)) + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_2d_without_missing(self): + # Test cov on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_1d_with_missing(self): + # Test cov 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.cov(nx), cov(x)) + assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(nx, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + # + try: + cov(x, allow_masked=False) + except ValueError: + pass + # + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1])) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False), + cov(x, x[::-1], rowvar=False)) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), + cov(x, x[::-1], rowvar=False, bias=True)) + + def test_2d_with_missing(self): + # Test cov on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + valid = np.logical_not(getmaskarray(x)).astype(int) + frac = np.dot(valid, valid.T) + xf = (x - x.mean(1)[:, None]).filled(0) + assert_almost_equal(cov(x), + np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) + assert_almost_equal(cov(x, bias=True), + np.cov(xf, bias=True) * x.shape[1] / frac) + frac = np.dot(valid.T, valid) + xf = (x - x.mean(0)).filled(0) + assert_almost_equal(cov(x, rowvar=False), + (np.cov(xf, rowvar=False) * + (x.shape[0] - 1) / (frac - 1.))) + assert_almost_equal(cov(x, rowvar=False, bias=True), + (np.cov(xf, rowvar=False, bias=True) * + x.shape[0] / frac)) + + +class TestCorrcoef: + + def setup_method(self): + self.data = array(np.random.rand(12)) + self.data2 = array(np.random.rand(12)) + + def test_ddof(self): + # ddof raises DeprecationWarning + x, y = self.data, self.data2 + expected = np.corrcoef(x) + expected2 = np.corrcoef(x, y) + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof has no or negligible effect on the function + assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) + assert_almost_equal(corrcoef(x, ddof=-1), expected) + assert_almost_equal(corrcoef(x, y, ddof=-1), expected2) + assert_almost_equal(corrcoef(x, ddof=3), expected) + assert_almost_equal(corrcoef(x, y, ddof=3), expected2) + + def test_bias(self): + x, y = self.data, self.data2 + expected = np.corrcoef(x) + # bias raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, x, y, True, False) + assert_warns(DeprecationWarning, corrcoef, x, y, True, True) + assert_warns(DeprecationWarning, corrcoef, x, bias=False) + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # bias has no or negligible effect on the function + assert_almost_equal(corrcoef(x, bias=1), expected) + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_2d_without_missing(self): + # Test corrcoef on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_1d_with_missing(self): + # Test corrcoef 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.corrcoef(nx), corrcoef(x)) + assert_almost_equal(np.corrcoef(nx, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + try: + corrcoef(x, allow_masked=False) + except ValueError: + pass + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) + assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), + corrcoef(x, x[::-1], rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof and bias have no or negligible effect on the function + assert_almost_equal(np.corrcoef(nx, nx[::-1]), + corrcoef(x, x[::-1], bias=1)) + assert_almost_equal(np.corrcoef(nx, nx[::-1]), + corrcoef(x, x[::-1], ddof=2)) + + def test_2d_with_missing(self): + # Test corrcoef on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + + test = corrcoef(x) + control = np.corrcoef(x) + assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof and bias have no or negligible effect on the function + assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], + control[:-1, :-1]) + assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1], + control[:-1, :-1]) + assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1], + control[:-1, :-1]) + + +class TestPolynomial: + # + def test_polyfit(self): + # Tests polyfit + # On ndarrays + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3)) + # ON 1D maskedarrays + x = x.view(MaskedArray) + x[0] = masked + y = y.view(MaskedArray) + y[0, 0] = y[-1, -1] = masked + # + (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, + full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + w = np.random.rand(10) + 1 + wo = w.copy() + xs = x[1:-1] + ys = y[1:-1] + ws = w[1:-1] + (C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w) + (c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws) + assert_equal(w, wo) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + def test_polyfit_with_masked_NaNs(self): + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + + x[0] = np.nan + y[-1,-1] = np.nan + x = x.view(MaskedArray) + y = y.view(MaskedArray) + x[0] = masked + y[-1,-1] = masked + + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + +class TestArraySetOps: + + def test_unique_onlist(self): + # Test unique on list + data = [1, 1, 1, 2, 2, 3] + test = unique(data, return_index=True, return_inverse=True) + assert_(isinstance(test[0], MaskedArray)) + assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) + assert_equal(test[1], [0, 3, 5]) + assert_equal(test[2], [0, 0, 0, 1, 1, 2]) + + def test_unique_onmaskedarray(self): + # Test unique on masked data w/use_mask=True + data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + # + data.fill_value = 3 + data = masked_array(data=[1, 1, 1, 2, 2, 3], + mask=[0, 0, 1, 0, 1, 0], fill_value=3) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + + def test_unique_allmasked(self): + # Test all masked + data = masked_array([1, 1, 1], mask=True) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, ], mask=[True])) + assert_equal(test[1], [0]) + assert_equal(test[2], [0, 0, 0]) + # + # Test masked + data = masked + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array(masked)) + assert_equal(test[1], [0]) + assert_equal(test[2], [0]) + + def test_ediff1d(self): + # Tests mediff1d + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) + test = ediff1d(x) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin(self): + # Test ediff1d w/ to_begin + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_begin=masked) + control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_begin=[1, 2, 3]) + control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_toend(self): + # Test ediff1d w/ to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked) + control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3]) + control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin_toend(self): + # Test ediff1d w/ to_begin and to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked) + control = array([0, 1, 1, 1, 4, 1, 2, 3], + mask=[1, 1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_ndarray(self): + # Test ediff1d w/ a ndarray + x = np.arange(5) + test = ediff1d(x) + control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) + assert_equal(test, control) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_intersect1d(self): + # Test intersect1d + x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + test = intersect1d(x, y) + control = array([1, 3, -1], mask=[0, 0, 1]) + assert_equal(test, control) + + def test_setxor1d(self): + # Test setxor1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7])) + # + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array([1, 2, 3]) + b = array([6, 5, 4]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + assert_array_equal([], setxor1d([], [])) + + def test_isin(self): + # the tests for in1d cover most of isin's behavior + # if in1d is removed, would need to change those tests to test + # isin instead. + a = np.arange(24).reshape([2, 3, 4]) + mask = np.zeros([2, 3, 4]) + mask[1, 2, 0] = 1 + a = array(a, mask=mask) + b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], + mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) + ec = zeros((2, 3, 4), dtype=bool) + ec[0, 0, 0] = True + ec[0, 0, 1] = True + ec[0, 2, 3] = True + c = isin(a, b) + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, ec) + #compare results of np.isin to ma.isin + d = np.isin(a, b[~b.mask]) & ~a.mask + assert_array_equal(c, d) + + def test_in1d(self): + # Test in1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, True, False, True]) + # + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, False, True, True]) + # + assert_array_equal([], in1d([], [])) + + def test_in1d_invert(self): + # Test in1d's invert parameter + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + assert_array_equal([], in1d([], [], invert=True)) + + def test_union1d(self): + # Test union1d + a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = union1d(a, b) + control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) + assert_equal(test, control) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]]) + y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1]) + ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1]) + z = union1d(x, y) + assert_equal(z, ez) + # + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + # Test setdiff1d + a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) + b = array([2, 4, 3, 3, 2, 1, 5]) + test = setdiff1d(a, b) + assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) + # + a = arange(10) + b = arange(8) + assert_equal(setdiff1d(a, b), array([8, 9])) + a = array([], np.uint32, mask=[]) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_char_array(self): + # Test setdiff1d_charray + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + +class TestShapeBase: + + def test_atleast_2d(self): + # Test atleast_2d + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = atleast_2d(a) + assert_equal(b.shape, (1, 3)) + assert_equal(b.mask.shape, b.data.shape) + assert_equal(a.shape, (3,)) + assert_equal(a.mask.shape, a.data.shape) + assert_equal(b.mask.shape, b.data.shape) + + def test_shape_scalar(self): + # the atleast and diagflat function should work with scalars + # GitHub issue #3367 + # Additionally, the atleast functions should accept multiple scalars + # correctly + b = atleast_1d(1.0) + assert_equal(b.shape, (1,)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_1d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1,)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_2d(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_2d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_3d(1.0) + assert_equal(b.shape, (1, 1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_3d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = diagflat(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.data.shape) + + +class TestNDEnumerate: + + def test_ndenumerate_nomasked(self): + ordinary = np.arange(6.).reshape((1, 3, 2)) + empty_mask = np.zeros_like(ordinary, dtype=bool) + with_mask = masked_array(ordinary, mask=empty_mask) + assert_equal(list(np.ndenumerate(ordinary)), + list(ndenumerate(ordinary))) + assert_equal(list(ndenumerate(ordinary)), + list(ndenumerate(with_mask))) + assert_equal(list(ndenumerate(with_mask)), + list(ndenumerate(with_mask, compressed=False))) + + def test_ndenumerate_allmasked(self): + a = masked_all(()) + b = masked_all((100,)) + c = masked_all((2, 3, 4)) + assert_equal(list(ndenumerate(a)), []) + assert_equal(list(ndenumerate(b)), []) + assert_equal(list(ndenumerate(b, compressed=False)), + list(zip(np.ndindex((100,)), 100 * [masked]))) + assert_equal(list(ndenumerate(c)), []) + assert_equal(list(ndenumerate(c, compressed=False)), + list(zip(np.ndindex((2, 3, 4)), 2 * 3 * 4 * [masked]))) + + def test_ndenumerate_mixedmasked(self): + a = masked_array(np.arange(12).reshape((3, 4)), + mask=[[1, 1, 1, 1], + [1, 1, 0, 1], + [0, 0, 0, 0]]) + items = [((1, 2), 6), + ((2, 0), 8), ((2, 1), 9), ((2, 2), 10), ((2, 3), 11)] + assert_equal(list(ndenumerate(a)), items) + assert_equal(len(list(ndenumerate(a, compressed=False))), a.size) + for coordinate, value in ndenumerate(a, compressed=False): + assert_equal(a[coordinate], value) + + +class TestStack: + + def test_stack_1d(self): + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = masked_array([9, 8, 7], mask=[1, 0, 0]) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_masks(self): + a = masked_array([0, 1, 2], mask=True) + b = masked_array([9, 8, 7], mask=False) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_nd(self): + # 2D + shp = (3, 2) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) + + # 4D + shp = (3, 2, 4, 5,) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/test_mrecords.py b/phivenv/Lib/site-packages/numpy/ma/tests/test_mrecords.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd0fd0757d080dae8011c98f0bdafc428b4018a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/tests/test_mrecords.py @@ -0,0 +1,493 @@ +# pylint: disable-msg=W0611, W0612, W0511,R0201 +"""Tests suite for mrecords. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import pickle + +import numpy as np +import numpy.ma as ma +from numpy.ma import masked, nomask +from numpy.testing import temppath +from numpy._core.records import ( + recarray, fromrecords as recfromrecords, fromarrays as recfromarrays + ) +from numpy.ma.mrecords import ( + MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords, + addfield + ) +from numpy.ma.testutils import ( + assert_, assert_equal, + assert_equal_records, + ) + + +class TestMRecords: + + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = [b'one', b'two', b'three', b'four', b'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mask = [0, 1, 0, 0, 1] + base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + + def test_byview(self): + # Test creation by view + base = self.base + mbase = base.view(mrecarray) + assert_equal(mbase.recordmask, base.recordmask) + assert_equal_records(mbase._mask, base._mask) + assert_(isinstance(mbase._data, recarray)) + assert_equal_records(mbase._data, base._data.view(recarray)) + for field in ('a', 'b', 'c'): + assert_equal(base[field], mbase[field]) + assert_equal_records(mbase.view(mrecarray), mbase) + + def test_get(self): + # Tests fields retrieval + base = self.base.copy() + mbase = base.view(mrecarray) + # As fields.......... + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase, field), mbase[field]) + assert_equal(base[field], mbase[field]) + # as elements ....... + mbase_first = mbase[0] + assert_(isinstance(mbase_first, mrecarray)) + assert_equal(mbase_first.dtype, mbase.dtype) + assert_equal(mbase_first.tolist(), (1, 1.1, b'one')) + # Used to be mask, now it's recordmask + assert_equal(mbase_first.recordmask, nomask) + assert_equal(mbase_first._mask.item(), (False, False, False)) + assert_equal(mbase_first['a'], mbase['a'][0]) + mbase_last = mbase[-1] + assert_(isinstance(mbase_last, mrecarray)) + assert_equal(mbase_last.dtype, mbase.dtype) + assert_equal(mbase_last.tolist(), (None, None, None)) + # Used to be mask, now it's recordmask + assert_equal(mbase_last.recordmask, True) + assert_equal(mbase_last._mask.item(), (True, True, True)) + assert_equal(mbase_last['a'], mbase['a'][-1]) + assert_((mbase_last['a'] is masked)) + # as slice .......... + mbase_sl = mbase[:2] + assert_(isinstance(mbase_sl, mrecarray)) + assert_equal(mbase_sl.dtype, mbase.dtype) + # Used to be mask, now it's recordmask + assert_equal(mbase_sl.recordmask, [0, 1]) + assert_equal_records(mbase_sl.mask, + np.array([(False, False, False), + (True, True, True)], + dtype=mbase._mask.dtype)) + assert_equal_records(mbase_sl, base[:2].view(mrecarray)) + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase_sl, field), base[:2][field]) + + def test_set_fields(self): + # Tests setting fields. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase = mbase.copy() + mbase.fill_value = (999999, 1e20, 'N/A') + # Change the data, the mask should be conserved + mbase.a._data[:] = 5 + assert_equal(mbase['a']._data, [5, 5, 5, 5, 5]) + assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) + # Change the elements, and the mask will follow + mbase.a = 1 + assert_equal(mbase['a']._data, [1]*5) + assert_equal(ma.getmaskarray(mbase['a']), [0]*5) + # Use to be _mask, now it's recordmask + assert_equal(mbase.recordmask, [False]*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0), + (0, 1, 1), + (0, 0, 0), + (0, 0, 0), + (0, 1, 1)], + dtype=bool)) + # Set a field to mask ........................ + mbase.c = masked + # Use to be mask, and now it's still mask ! + assert_equal(mbase.c.mask, [1]*5) + assert_equal(mbase.c.recordmask, [1]*5) + assert_equal(ma.getmaskarray(mbase['c']), [1]*5) + assert_equal(ma.getdata(mbase['c']), [b'N/A']*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 1), + (0, 1, 1), + (0, 0, 1), + (0, 0, 1), + (0, 1, 1)], + dtype=bool)) + # Set fields by slices ....................... + mbase = base.view(mrecarray).copy() + mbase.a[3:] = 5 + assert_equal(mbase.a, [1, 2, 3, 5, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 0]) + mbase.b[3:] = masked + assert_equal(mbase.b, base['b']) + assert_equal(mbase.b._mask, [0, 1, 0, 1, 1]) + # Set fields globally.......................... + ndtype = [('alpha', '|S1'), ('num', int)] + data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype) + rdata = data.view(MaskedRecords) + val = ma.array([10, 20, 30], mask=[1, 0, 0]) + + rdata['num'] = val + assert_equal(rdata.num, val) + assert_equal(rdata.num.mask, [1, 0, 0]) + + def test_set_fields_mask(self): + # Tests setting the mask of a field. + base = self.base.copy() + # This one has already a mask.... + mbase = base.view(mrecarray) + mbase['a'][-2] = masked + assert_equal(mbase.a, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 1, 1]) + # This one has not yet + mbase = fromarrays([np.arange(5), np.random.rand(5)], + dtype=[('a', int), ('b', float)]) + mbase['a'][-2] = masked + assert_equal(mbase.a, [0, 1, 2, 3, 4]) + assert_equal(mbase.a._mask, [0, 0, 0, 1, 0]) + + def test_set_mask(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Set the mask to True ....................... + mbase.mask = masked + assert_equal(ma.getmaskarray(mbase['b']), [1]*5) + assert_equal(mbase['a']._mask, mbase['b']._mask) + assert_equal(mbase['a']._mask, mbase['c']._mask) + assert_equal(mbase._mask.tolist(), + np.array([(1, 1, 1)]*5, dtype=bool)) + # Delete the mask ............................ + mbase.mask = nomask + assert_equal(ma.getmaskarray(mbase['c']), [0]*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0)]*5, dtype=bool)) + + def test_set_mask_fromarray(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Sets the mask w/ an array + mbase.mask = [1, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [1, 0, 0, 0, 1]) + # Yay, once more ! + mbase.mask = [0, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [0, 0, 0, 0, 1]) + + def test_set_mask_fromfields(self): + mbase = self.base.copy().view(mrecarray) + + nmask = np.array( + [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)], + dtype=[('a', bool), ('b', bool), ('c', bool)]) + mbase.mask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + # Reinitialize and redo + mbase.mask = False + mbase.fieldmask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + + def test_set_elements(self): + base = self.base.copy() + # Set an element to mask ..................... + mbase = base.view(mrecarray).copy() + mbase[-2] = masked + assert_equal( + mbase._mask.tolist(), + np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)], + dtype=bool)) + # Used to be mask, now it's recordmask! + assert_equal(mbase.recordmask, [0, 1, 0, 1, 1]) + # Set slices ................................. + mbase = base.view(mrecarray).copy() + mbase[:2] = (5, 5, 5) + assert_equal(mbase.a._data, [5, 5, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'5', b'5', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + + mbase = base.view(mrecarray).copy() + mbase[:2] = masked + assert_equal(mbase.a._data, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + + def test_setslices_hardmask(self): + # Tests setting slices w/ hardmask. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + try: + mbase[-2:] = (5, 5, 5) + assert_equal(mbase.a._data, [1, 2, 3, 5, 5]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'5', b'five']) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 1]) + assert_equal(mbase.b._mask, mbase.a._mask) + assert_equal(mbase.b._mask, mbase.c._mask) + except NotImplementedError: + # OK, not implemented yet... + pass + except AssertionError: + raise + else: + raise Exception("Flexible hard masks should be supported !") + # Not using a tuple should crash + try: + mbase[-2:] = 3 + except (NotImplementedError, TypeError): + pass + else: + raise TypeError("Should have expected a readable buffer object!") + + def test_hardmask(self): + # Test hardmask + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + assert_(mbase._hardmask) + mbase.mask = nomask + assert_equal_records(mbase._mask, base._mask) + mbase.soften_mask() + assert_(not mbase._hardmask) + mbase.mask = nomask + # So, the mask of a field is no longer set to nomask... + assert_equal_records(mbase._mask, + ma.make_mask_none(base.shape, base.dtype)) + assert_(ma.make_mask(mbase['b']._mask) is nomask) + assert_equal(mbase['a']._mask, mbase['b']._mask) + + def test_pickling(self): + # Test pickling + base = self.base.copy() + mrec = base.view(mrecarray) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + _ = pickle.dumps(mrec, protocol=proto) + mrec_ = pickle.loads(_) + assert_equal(mrec_.dtype, mrec.dtype) + assert_equal_records(mrec_._data, mrec._data) + assert_equal(mrec_._mask, mrec._mask) + assert_equal_records(mrec_._mask, mrec._mask) + + def test_filled(self): + # Test filling the array + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + mrecfilled = mrec.filled() + assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int)) + assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.), + dtype=float)) + assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'), + dtype='|S8')) + + def test_tolist(self): + # Test tolist. + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + + assert_equal(mrec.tolist(), + [(1, 1.1, None), (2, 2.2, b'two'), + (None, None, b'three')]) + + def test_withnames(self): + # Test the creation w/ format and names + x = mrecarray(1, formats=float, names='base') + x[0]['base'] = 10 + assert_equal(x['base'][0], 10) + + def test_exotic_formats(self): + # Test that 'exotic' formats are processed properly + easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)]) + easy[0] = masked + assert_equal(easy.filled(1).item(), (1, b'1', 1.)) + + solo = mrecarray(1, dtype=[('f0', ' 1: + assert_(eq(np.concatenate((x, y), 1), + concatenate((xm, ym), 1))) + assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1))) + assert_(eq(np.sum(x, 1), sum(x, 1))) + assert_(eq(np.prod(x, 1), product(x, 1))) + + def test_testCI(self): + # Test of conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_(eq(np.sort(x1), sort(x2, fill_value=0))) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_(eq(x1[2], x2[2])) + assert_(eq(x1[2:5], x2[2:5])) + assert_(eq(x1[:], x2[:])) + assert_(eq(x1[1:], x3[1:])) + x1[2] = 9 + x2[2] = 9 + assert_(eq(x1, x2)) + x1[1:3] = 99 + x2[1:3] = 99 + assert_(eq(x1, x2)) + x2[1] = masked + assert_(eq(x1, x2)) + x2[1:3] = masked + assert_(eq(x1, x2)) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_(eq(x1, x2)) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_(eq(3.0, x2.fill_value)) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_testCopySize(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=True) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_(y1._data is not x1) + assert_(allequal(x1, y1._data)) + assert_(y1._mask is m) + + y1a = array(y1, copy=0) + # For copy=False, one might expect that the array would just + # passed on, i.e., that it would be "is" instead of "==". + # See gh-4043 for discussion. + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3, copy=0) + assert_(y2._mask is m3) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2._mask is m3) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._mask is not m) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + assert_(y2a._mask is not m) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_(eq(concatenate([x4, x4]), y4)) + assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) + y6 = repeat(x4, 2, axis=0) + assert_(eq(y5, y6)) + + def test_testPut(self): + # Test of put + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + m2 = m.copy() + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x._mask is m) + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_(eq(x, [0, 10, 2, -1, 40])) + + x = array(d, mask=m2, copy=True) + x.put([0, 1, 2], [-1, 100, 200]) + assert_(x._mask is not m2) + assert_(x[3] is masked) + assert_(x[4] is masked) + assert_(eq(x, [-1, 100, 200, 0, 0])) + + def test_testPut2(self): + # Test of put + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + x[2:4] = z + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + y = x[2:4] + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + y[:] = z + assert_(y[0] is masked) + assert_(y[1] is not masked) + assert_(eq(y, [10, 40])) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + def test_testMaPut(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] + i = np.nonzero(m)[0] + put(ym, i, zm) + assert_(all(take(ym, i, axis=0) == zm)) + + def test_testOddFeatures(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_(eq(z.real, x)) + assert_(eq(z.imag, 10 * x)) + assert_(eq((z * conjugate(z)).real, 101 * x * x)) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_(eq(x, z)) + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_(eq(x, z)) + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + c[0] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2))) + assert_(eq(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2))) + assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2))) + assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])) + assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])) + assert_(eq(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0])) + assert_(eq(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1])) + assert_(eq(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0])) + assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1])) + assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5])) + atest = ones((10, 10, 10), dtype=np.float32) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_(eq(atest, ctest)) + z = choose(c, (-x, x)) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + x = arange(6) + x[5] = masked + y = arange(6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_(eq(z, zm)) + assert_(getmask(zm) is nomask) + assert_(eq(zm, [0, 1, 2, 30, 40, 50])) + z = where(c, masked, 1) + assert_(eq(z, [99, 99, 99, 1, 1, 1])) + z = where(c, 1, masked) + assert_(eq(z, [99, 1, 1, 99, 99, 99])) + + def test_testMinMax2(self): + # Test of minimum, maximum. + assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])) + assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])) + x = arange(5) + y = arange(5) - 2 + x[3] = masked + y[0] = masked + assert_(eq(minimum(x, y), where(less(x, y), x, y))) + assert_(eq(maximum(x, y), where(greater(x, y), x, y))) + assert_(minimum.reduce(x) == 0) + assert_(maximum.reduce(x) == 4) + + def test_testTakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))) + assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))) + assert_(eq(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y))) + assert_(eq(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y))) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_testInplace(self): + # Test of inplace operations and rich comparisons + y = arange(10) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x += 1 + assert_(eq(x, y + 1)) + xm += 1 + assert_(eq(x, y + 1)) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x -= 1 + assert_(eq(x, y - 1)) + xm -= 1 + assert_(eq(xm, y - 1)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x *= 2.0 + assert_(eq(x, y * 2)) + xm *= 2.0 + assert_(eq(xm, y * 2)) + + x = arange(10) * 2 + xm = arange(10) + xm[2] = masked + x //= 2 + assert_(eq(x, y)) + xm //= 2 + assert_(eq(x, y)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x /= 2.0 + assert_(eq(x, y / 2.0)) + xm /= arange(10) + assert_(eq(xm, ones((10,)))) + + x = arange(10).astype(np.float32) + xm = arange(10) + xm[2] = masked + x += 1. + assert_(eq(x, y + 1.)) + + def test_testPickle(self): + # Test of pickling + x = arange(12) + x[4:10:2] = masked + x = x.reshape(4, 3) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + s = pickle.dumps(x, protocol=proto) + y = pickle.loads(s) + assert_(eq(x, y)) + + def test_testMasked(self): + # Test of masked element + xx = arange(6) + xx[1] = masked + assert_(str(masked) == '--') + assert_(xx[1] is masked) + assert_equal(filled(xx[1], 0), 0) + + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + assert_(eq(2.0, average(ott, axis=0))) + assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) + assert_(eq(2.0, result)) + assert_(wts == 4.0) + ott[:] = masked + assert_(average(ott, axis=0) is masked) + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_(eq(average(ott, axis=0), [2.0, 0.0])) + assert_(average(ott, axis=1)[0] is masked) + assert_(eq([2., 0.], average(ott, axis=0))) + result, wts = average(ott, axis=0, returned=True) + assert_(eq(wts, [1., 0.])) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6) + assert_(allclose(average(x, axis=0), 2.5)) + assert_(allclose(average(x, axis=0, weights=w1), 2.5)) + y = array([arange(6), 2.0 * arange(6)]) + assert_(allclose(average(y, None), + np.add.reduce(np.arange(6)) * 3. / 12.)) + assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0)*2.0])) + assert_(allclose(average(y, None, weights=w2), 20. / 6.)) + assert_(allclose(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.])) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0)*2.0])) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_(allclose(average(masked_array(x, m1), axis=0), 2.5)) + assert_(allclose(average(masked_array(x, m2), axis=0), 2.5)) + assert_(average(masked_array(x, m4), axis=0) is masked) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_(allclose(average(z, None), 20. / 6.)) + assert_(allclose(average(z, axis=0), + [0., 1., 99., 99., 4.0, 7.5])) + assert_(allclose(average(z, axis=1), [2.5, 5.0])) + assert_(allclose(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0])) + + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) + assert_(shape(w2) == shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[0, 0], [1, 0]]) + a2da = average(a2d, axis=0) + assert_(eq(a2da, [0.5, 3.0])) + a2dma = average(a2dm, axis=0) + assert_(eq(a2dma, [1.0, 3.0])) + a2dma = average(a2dm, axis=None) + assert_(eq(a2dma, 7. / 3.)) + a2dma = average(a2dm, axis=1) + assert_(eq(a2dma, [1.5, 4.0])) + + def test_testToPython(self): + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + assert_raises(ValueError, bool, array([0, 1])) + assert_raises(ValueError, bool, array([0, 0], mask=[0, 1])) + + def test_testScalarArithmetic(self): + xm = array(0, mask=1) + #TODO FIXME: Find out what the following raises a warning in r8247 + with np.errstate(divide='ignore'): + assert_((1 / array(0)).mask) + assert_((1 + xm).mask) + assert_((-xm).mask) + assert_((-xm).mask) + assert_(maximum(xm, xm).mask) + assert_(minimum(xm, xm).mask) + assert_(xm.filled().dtype is xm._data.dtype) + x = array(0, mask=0) + assert_(x.filled() == x._data) + assert_equal(str(xm), str(masked_print_option)) + + def test_testArrayMethods(self): + a = array([1, 3, 2]) + assert_(eq(a.any(), a._data.any())) + assert_(eq(a.all(), a._data.all())) + assert_(eq(a.argmax(), a._data.argmax())) + assert_(eq(a.argmin(), a._data.argmin())) + assert_(eq(a.choose(0, 1, 2, 3, 4), + a._data.choose(0, 1, 2, 3, 4))) + assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) + assert_(eq(a.conj(), a._data.conj())) + assert_(eq(a.conjugate(), a._data.conjugate())) + m = array([[1, 2], [3, 4]]) + assert_(eq(m.diagonal(), m._data.diagonal())) + assert_(eq(a.sum(), a._data.sum())) + assert_(eq(a.take([1, 2]), a._data.take([1, 2]))) + assert_(eq(m.transpose(), m._data.transpose())) + + def test_testArrayAttributes(self): + a = array([1, 3, 2]) + assert_equal(a.ndim, 1) + + def test_testAPI(self): + assert_(not [m for m in dir(np.ndarray) + if m not in dir(MaskedArray) and + not m.startswith('_')]) + + def test_testSingleElementSubscript(self): + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + def test_assignment_by_condition(self): + # Test for gh-18951 + a = array([1, 2, 3, 4], mask=[1, 0, 1, 0]) + c = a >= 3 + a[c] = 5 + assert_(a[2] is masked) + + def test_assignment_by_condition_2(self): + # gh-19721 + a = masked_array([0, 1], mask=[False, False]) + b = masked_array([0, 1], mask=[True, True]) + mask = a < 1 + b[mask] = a[mask] + expected_mask = [False, True] + assert_equal(b.mask, expected_mask) + + +class TestUfuncs: + def setup_method(self): + self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) + + def test_testUfuncRegression(self): + f_invalid_ignore = [ + 'sqrt', 'arctanh', 'arcsin', 'arccos', + 'arccosh', 'arctanh', 'log', 'log10', 'divide', + 'true_divide', 'floor_divide', 'remainder', 'fmod'] + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', + 'sin', 'cos', 'tan', + 'arcsin', 'arccos', 'arctan', + 'sinh', 'cosh', 'tanh', + 'arcsinh', + 'arccosh', + 'arctanh', + 'absolute', 'fabs', 'negative', + 'floor', 'ceil', + 'logical_not', + 'add', 'subtract', 'multiply', + 'divide', 'true_divide', 'floor_divide', + 'remainder', 'fmod', 'hypot', 'arctan2', + 'equal', 'not_equal', 'less_equal', 'greater_equal', + 'less', 'greater', + 'logical_and', 'logical_or', 'logical_xor']: + try: + uf = getattr(umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(np.ma, f) + args = self.d[:uf.nin] + with np.errstate(): + if f in f_invalid_ignore: + np.seterr(invalid='ignore') + if f in ['arctanh', 'log', 'log10']: + np.seterr(divide='ignore') + ur = uf(*args) + mr = mf(*args) + assert_(eq(ur.filled(0), mr.filled(0), f)) + assert_(eqmask(ur.mask, mr.mask)) + + def test_reduce(self): + a = self.d[0] + assert_(not alltrue(a, axis=0)) + assert_(sometrue(a, axis=0)) + assert_equal(sum(a[:3], axis=0), 0) + assert_equal(product(a, axis=0), 0) + + def test_minmax(self): + a = arange(1, 13).reshape(3, 4) + amask = masked_where(a < 5, a) + assert_equal(amask.max(), a.max()) + assert_equal(amask.min(), 5) + assert_((amask.max(0) == a.max(0)).all()) + assert_((amask.min(0) == [5, 6, 7, 8]).all()) + assert_(amask.max(1)[0].mask) + assert_(amask.min(1)[0].mask) + + def test_nonzero(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) + assert_(eq(nonzero(x), [0])) + + +class TestArrayMethods: + + def setup_method(self): + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + self.d = (x, X, XX, m, mx, mX, mXX) + + def test_trace(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_(eq(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0))) + + def test_clip(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + clipped = mx.clip(2, 8) + assert_(eq(clipped.mask, mx.mask)) + assert_(eq(clipped._data, x.clip(2, 8))) + assert_(eq(clipped._data, mx._data.clip(2, 8))) + + def test_ptp(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + (n, m) = X.shape + # print(type(mx), mx.compressed()) + # raise Exception() + assert_equal(mx.ptp(), np.ptp(mx.compressed())) + rows = np.zeros(n, np.float64) + cols = np.zeros(m, np.float64) + for k in range(m): + cols[k] = np.ptp(mX[:, k].compressed()) + for k in range(n): + rows[k] = np.ptp(mX[k].compressed()) + assert_(eq(mX.ptp(0), cols)) + assert_(eq(mX.ptp(1), rows)) + + def test_swapaxes(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXswapped = mX.swapaxes(0, 1) + assert_(eq(mXswapped[-1], mX[:, -1])) + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_cumprod(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumprod(0) + assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) + mXcp = mX.cumprod(1) + assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) + + def test_cumsum(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumsum(0) + assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) + mXcp = mX.cumsum(1) + assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) + + def test_varstd(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + assert_(eq(mX.var(axis=None), mX.compressed().var())) + assert_(eq(mX.std(axis=None), mX.compressed().std())) + assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) + assert_(eq(mX.var().shape, X.var().shape)) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + for k in range(6): + assert_(eq(mXvar1[k], mX[k].compressed().var())) + assert_(eq(mXvar0[k], mX[:, k].compressed().var())) + assert_(eq(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std())) + + +def eqmask(m1, m2): + if m1 is nomask: + return m2 is nomask + if m2 is nomask: + return m1 is nomask + return (m1 == m2).all() diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/test_regression.py b/phivenv/Lib/site-packages/numpy/ma/tests/test_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..f72f1b547b303982570e4d6c0f2bd2fb00429c50 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/tests/test_regression.py @@ -0,0 +1,97 @@ +import numpy as np +from numpy.testing import ( + assert_, assert_array_equal, assert_allclose, suppress_warnings + ) + + +class TestRegression: + def test_masked_array_create(self): + # Ticket #17 + x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], + mask=[0, 0, 0, 1, 1, 1, 0, 0]) + assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]]) + + def test_masked_array(self): + # Ticket #61 + np.ma.array(1, mask=[1]) + + def test_mem_masked_where(self): + # Ticket #62 + from numpy.ma import masked_where, MaskType + a = np.zeros((1, 1)) + b = np.zeros(a.shape, MaskType) + c = masked_where(b, a) + a-c + + def test_masked_array_multiply(self): + # Ticket #254 + a = np.ma.zeros((4, 1)) + a[2, 0] = np.ma.masked + b = np.zeros((4, 2)) + a*b + b*a + + def test_masked_array_repeat(self): + # Ticket #271 + np.ma.array([1], mask=False).repeat(10) + + def test_masked_array_repr_unicode(self): + # Ticket #1256 + repr(np.ma.array("Unicode")) + + def test_atleast_2d(self): + # Ticket #1559 + a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False]) + b = np.atleast_2d(a) + assert_(a.mask.ndim == 1) + assert_(b.mask.ndim == 2) + + def test_set_fill_value_unicode_py3(self): + # Ticket #2733 + a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0]) + a.fill_value = 'X' + assert_(a.fill_value == 'X') + + def test_var_sets_maskedarray_scalar(self): + # Issue gh-2757 + a = np.ma.array(np.arange(5), mask=True) + mout = np.ma.array(-1, dtype=float) + a.var(out=mout) + assert_(mout._data == 0) + + def test_ddof_corrcoef(self): + # See gh-3336 + x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) + y = np.array([2, 2.5, 3.1, 3, 5]) + # this test can be removed after deprecation. + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + r0 = np.ma.corrcoef(x, y, ddof=0) + r1 = np.ma.corrcoef(x, y, ddof=1) + # ddof should not have an effect (it gets cancelled out) + assert_allclose(r0.data, r1.data) + + def test_mask_not_backmangled(self): + # See gh-10314. Test case taken from gh-3140. + a = np.ma.MaskedArray([1., 2.], mask=[False, False]) + assert_(a.mask.shape == (2,)) + b = np.tile(a, (2, 1)) + # Check that the above no longer changes a.shape to (1, 2) + assert_(a.mask.shape == (2,)) + assert_(b.shape == (2, 2)) + assert_(b.mask.shape == (2, 2)) + + def test_empty_list_on_structured(self): + # See gh-12464. Indexing with empty list should give empty result. + ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4') + assert_array_equal(ma[[]], ma[:0]) + + def test_masked_array_tobytes_fortran(self): + ma = np.ma.arange(4).reshape((2,2)) + assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes()) + + def test_structured_array(self): + # see gh-22041 + np.ma.array((1, (b"", b"")), + dtype=[("x", np.int_), + ("y", [("i", np.void), ("j", np.void)])]) diff --git a/phivenv/Lib/site-packages/numpy/ma/tests/test_subclassing.py b/phivenv/Lib/site-packages/numpy/ma/tests/test_subclassing.py new file mode 100644 index 0000000000000000000000000000000000000000..cb9a31d56e4bb02a88490ddb365635621286753b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/tests/test_subclassing.py @@ -0,0 +1,460 @@ +# pylint: disable-msg=W0611, W0612, W0511,R0201 +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +import numpy as np +from numpy.lib.mixins import NDArrayOperatorsMixin +from numpy.testing import assert_, assert_raises +from numpy.ma.testutils import assert_equal +from numpy.ma.core import ( + array, arange, masked, MaskedArray, masked_array, log, add, hypot, + divide, asarray, asanyarray, nomask + ) +# from numpy.ma.core import ( + +def assert_startswith(a, b): + # produces a better error message than assert_(a.startswith(b)) + assert_equal(a[:len(b)], b) + +class SubArray(np.ndarray): + # Defines a generic np.ndarray subclass, that stores some metadata + # in the dictionary `info`. + def __new__(cls,arr,info={}): + x = np.asanyarray(arr).view(cls) + x.info = info.copy() + return x + + def __array_finalize__(self, obj): + super().__array_finalize__(obj) + self.info = getattr(obj, 'info', {}).copy() + return + + def __add__(self, other): + result = super().__add__(other) + result.info['added'] = result.info.get('added', 0) + 1 + return result + + def __iadd__(self, other): + result = super().__iadd__(other) + result.info['iadded'] = result.info.get('iadded', 0) + 1 + return result + + +subarray = SubArray + + +class SubMaskedArray(MaskedArray): + """Pure subclass of MaskedArray, keeping some info on subclass.""" + def __new__(cls, info=None, **kwargs): + obj = super().__new__(cls, **kwargs) + obj._optinfo['info'] = info + return obj + + +class MSubArray(SubArray, MaskedArray): + + def __new__(cls, data, info={}, mask=nomask): + subarr = SubArray(data, info) + _data = MaskedArray.__new__(cls, data=subarr, mask=mask) + _data.info = subarr.info + return _data + + @property + def _series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + +msubarray = MSubArray + + +# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing +# setting to non-class values (and thus np.ma.core.masked_print_option) +# and overrides __array_wrap__, updating the info dict, to check that this +# doesn't get destroyed by MaskedArray._update_from. But this one also needs +# its own iterator... +class CSAIterator: + """ + Flat iterator object that uses its own setter/getter + (works around ndarray.flat not propagating subclass setters/getters + see https://github.com/numpy/numpy/issues/4564) + roughly following MaskedIterator + """ + def __init__(self, a): + self._original = a + self._dataiter = a.view(np.ndarray).flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + out = self._dataiter.__getitem__(indx) + if not isinstance(out, np.ndarray): + out = out.__array__() + out = out.view(type(self._original)) + return out + + def __setitem__(self, index, value): + self._dataiter[index] = self._original._validate_input(value) + + def __next__(self): + return next(self._dataiter).__array__().view(type(self._original)) + + +class ComplicatedSubArray(SubArray): + + def __str__(self): + return f'myprefix {self.view(SubArray)} mypostfix' + + def __repr__(self): + # Return a repr that does not start with 'name(' + return f'<{self.__class__.__name__} {self}>' + + def _validate_input(self, value): + if not isinstance(value, ComplicatedSubArray): + raise ValueError("Can only set to MySubArray values") + return value + + def __setitem__(self, item, value): + # validation ensures direct assignment with ndarray or + # masked_print_option will fail + super().__setitem__(item, self._validate_input(value)) + + def __getitem__(self, item): + # ensure getter returns our own class also for scalars + value = super().__getitem__(item) + if not isinstance(value, np.ndarray): # scalar + value = value.__array__().view(ComplicatedSubArray) + return value + + @property + def flat(self): + return CSAIterator(self) + + @flat.setter + def flat(self, value): + y = self.ravel() + y[:] = value + + def __array_wrap__(self, obj, context=None, return_scalar=False): + obj = super().__array_wrap__(obj, context, return_scalar) + if context is not None and context[0] is np.multiply: + obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1 + + return obj + + +class WrappedArray(NDArrayOperatorsMixin): + """ + Wrapping a MaskedArray rather than subclassing to test that + ufunc deferrals are commutative. + See: https://github.com/numpy/numpy/issues/15200) + """ + __slots__ = ('_array', 'attrs') + __array_priority__ = 20 + + def __init__(self, array, **attrs): + self._array = array + self.attrs = attrs + + def __repr__(self): + return f"{self.__class__.__name__}(\n{self._array}\n{self.attrs}\n)" + + def __array__(self, dtype=None, copy=None): + return np.asarray(self._array) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if method == '__call__': + inputs = [arg._array if isinstance(arg, self.__class__) else arg + for arg in inputs] + return self.__class__(ufunc(*inputs, **kwargs), **self.attrs) + else: + return NotImplemented + + +class TestSubclassing: + # Test suite for masked subclasses of ndarray. + + def setup_method(self): + x = np.arange(5, dtype='float') + mx = msubarray(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_data_subclassing(self): + # Tests whether the subclass is kept. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xsub = SubArray(x) + xmsub = masked_array(xsub, mask=m) + assert_(isinstance(xmsub, MaskedArray)) + assert_equal(xmsub._data, xsub) + assert_(isinstance(xmsub._data, SubArray)) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + assert_(isinstance(mx._data, subarray)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), msubarray)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a msubarray + assert_(isinstance(add(mx, mx), msubarray)) + assert_(isinstance(add(mx, x), msubarray)) + # Result should work + assert_equal(add(mx, x), mx+x) + assert_(isinstance(add(mx, mx)._data, subarray)) + assert_(isinstance(add.outer(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, x), msubarray)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), msubarray)) + assert_(isinstance(divide(mx, x), msubarray)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + + def test_attributepropagation(self): + x = array(arange(5), mask=[0]+[1]*4) + my = masked_array(subarray(x)) + ym = msubarray(x) + # + z = (my+1) + assert_(isinstance(z, MaskedArray)) + assert_(not isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_equal(z._data.info, {}) + # + z = (ym+1) + assert_(isinstance(z, MaskedArray)) + assert_(isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_(z._data.info['added'] > 0) + # Test that inplace methods from data get used (gh-4617) + ym += 1 + assert_(isinstance(ym, MaskedArray)) + assert_(isinstance(ym, MSubArray)) + assert_(isinstance(ym._data, SubArray)) + assert_(ym._data.info['iadded'] > 0) + # + ym._set_mask([1, 0, 0, 0, 1]) + assert_equal(ym._mask, [1, 0, 0, 0, 1]) + ym._series._set_mask([0, 0, 0, 0, 1]) + assert_equal(ym._mask, [0, 0, 0, 0, 1]) + # + xsub = subarray(x, info={'name':'x'}) + mxsub = masked_array(xsub) + assert_(hasattr(mxsub, 'info')) + assert_equal(mxsub.info, xsub.info) + + def test_subclasspreservation(self): + # Checks that masked_array(...,subok=True) preserves the class. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xinfo = [(i, j) for (i, j) in zip(x, m)] + xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) + # + mxsub = masked_array(xsub, subok=False) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = asarray(xsub) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = masked_array(xsub, subok=True) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, xsub._mask) + # + mxsub = asanyarray(xsub) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, m) + + def test_subclass_items(self): + """test that getter and setter go via baseclass""" + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + # getter should return a ComplicatedSubArray, even for single item + # first check we wrote ComplicatedSubArray correctly + assert_(isinstance(xcsub[1], ComplicatedSubArray)) + assert_(isinstance(xcsub[1,...], ComplicatedSubArray)) + assert_(isinstance(xcsub[1:4], ComplicatedSubArray)) + + # now that it propagates inside the MaskedArray + assert_(isinstance(mxcsub[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray)) + assert_(mxcsub[0] is masked) + assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) + + # also for flattened version (which goes via MaskedIterator) + assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray)) + assert_(mxcsub.flat[0] is masked) + assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray)) + + # setter should only work with ComplicatedSubArray input + # first check we wrote ComplicatedSubArray correctly + assert_raises(ValueError, xcsub.__setitem__, 1, x[4]) + # now that it propagates inside the MaskedArray + assert_raises(ValueError, mxcsub.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4]) + mxcsub[1] = xcsub[4] + mxcsub[1:4] = xcsub[1:4] + # also for flattened version (which goes via MaskedIterator) + assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4]) + mxcsub.flat[1] = xcsub[4] + mxcsub.flat[1:4] = xcsub[1:4] + + def test_subclass_nomask_items(self): + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub_nomask = masked_array(xcsub) + + assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray)) + + assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) + + def test_subclass_repr(self): + """test that repr uses the name of the subclass + and 'array' for np.ndarray""" + x = np.arange(5) + mx = masked_array(x, mask=[True, False, True, False, False]) + assert_startswith(repr(mx), 'masked_array') + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_startswith(repr(mxsub), + f'masked_{SubArray.__name__}(data=[--, 1, --, 3, 4]') + + def test_subclass_str(self): + """test str with subclass that has overridden str, setitem""" + # first without override + x = np.arange(5) + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_equal(str(mxsub), '[-- 1 -- 3 4]') + + xcsub = ComplicatedSubArray(x) + assert_raises(ValueError, xcsub.__setitem__, 0, + np.ma.core.masked_print_option) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix') + + def test_pure_subclass_info_preservation(self): + # Test that ufuncs and methods conserve extra information consistently; + # see gh-7122. + arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6]) + arr2 = SubMaskedArray(data=[0,1,2,3,4,5]) + diff1 = np.subtract(arr1, arr2) + assert_('info' in diff1._optinfo) + assert_(diff1._optinfo['info'] == 'test') + diff2 = arr1 - arr2 + assert_('info' in diff2._optinfo) + assert_(diff2._optinfo['info'] == 'test') + + +class ArrayNoInheritance: + """Quantity-like class that does not inherit from ndarray""" + def __init__(self, data, units): + self.magnitude = data + self.units = units + + def __getattr__(self, attr): + return getattr(self.magnitude, attr) + + +def test_array_no_inheritance(): + data_masked = np.ma.array([1, 2, 3], mask=[True, False, True]) + data_masked_units = ArrayNoInheritance(data_masked, 'meters') + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units) + assert_equal(data_masked.data, new_array.data) + assert_equal(data_masked.mask, new_array.mask) + # Test sharing the mask + data_masked.mask = [True, False, False] + assert_equal(data_masked.mask, new_array.mask) + assert_(new_array.sharedmask) + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units, copy=True) + assert_equal(data_masked.data, new_array.data) + assert_equal(data_masked.mask, new_array.mask) + # Test that the mask is not shared when copy=True + data_masked.mask = [True, False, True] + assert_equal([True, False, False], new_array.mask) + assert_(not new_array.sharedmask) + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units, keep_mask=False) + assert_equal(data_masked.data, new_array.data) + # The change did not affect the original mask + assert_equal(data_masked.mask, [True, False, True]) + # Test that the mask is False and not shared when keep_mask=False + assert_(not new_array.mask) + assert_(not new_array.sharedmask) + + +class TestClassWrapping: + # Test suite for classes that wrap MaskedArrays + + def setup_method(self): + m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) + wm = WrappedArray(m) + self.data = (m, wm) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (m, wm) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(np.log(wm), WrappedArray)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (m, wm) = self.data + # Result should be a WrappedArray + assert_(isinstance(np.add(wm, wm), WrappedArray)) + assert_(isinstance(np.add(m, wm), WrappedArray)) + assert_(isinstance(np.add(wm, m), WrappedArray)) + # add and '+' should call the same ufunc + assert_equal(np.add(m, wm), m + wm) + assert_(isinstance(np.hypot(m, wm), WrappedArray)) + assert_(isinstance(np.hypot(wm, m), WrappedArray)) + # Test domained binary operations + assert_(isinstance(np.divide(wm, m), WrappedArray)) + assert_(isinstance(np.divide(m, wm), WrappedArray)) + assert_equal(np.divide(wm, m) * m, np.divide(m, m) * wm) + # Test broadcasting + m2 = np.stack([m, m]) + assert_(isinstance(np.divide(wm, m2), WrappedArray)) + assert_(isinstance(np.divide(m2, wm), WrappedArray)) + assert_equal(np.divide(m2, wm), np.divide(wm, m2)) + + def test_mixins_have_slots(self): + mixin = NDArrayOperatorsMixin() + # Should raise an error + assert_raises(AttributeError, mixin.__setattr__, "not_a_real_attr", 1) + + m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) + wm = WrappedArray(m) + assert_raises(AttributeError, wm.__setattr__, "not_an_attr", 2) diff --git a/phivenv/Lib/site-packages/numpy/ma/testutils.py b/phivenv/Lib/site-packages/numpy/ma/testutils.py new file mode 100644 index 0000000000000000000000000000000000000000..36a67653dcc2106b982b979ef7df6231adbf544e --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/testutils.py @@ -0,0 +1,292 @@ +"""Miscellaneous functions for testing masked arrays and subclasses + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $ + +""" +import operator + +import numpy as np +from numpy import ndarray +import numpy._core.umath as umath +import numpy.testing +from numpy.testing import ( + assert_, assert_allclose, assert_array_almost_equal_nulp, + assert_raises, build_err_msg + ) +from .core import mask_or, getmask, masked_array, nomask, masked, filled + +__all__masked = [ + 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal', + 'assert_array_approx_equal', 'assert_array_compare', + 'assert_array_equal', 'assert_array_less', 'assert_close', + 'assert_equal', 'assert_equal_records', 'assert_mask_equal', + 'assert_not_equal', 'fail_if_array_equal', + ] + +# Include some normal test functions to avoid breaking other projects who +# have mistakenly included them from this file. SciPy is one. That is +# unfortunate, as some of these functions are not intended to work with +# masked arrays. But there was no way to tell before. +from unittest import TestCase +__some__from_testing = [ + 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp', + 'assert_raises' + ] + +__all__ = __all__masked + __some__from_testing + + +def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8): + """ + Returns true if all components of a and b are equal to given tolerances. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. The relative error rtol should + be positive and << 1.0 The absolute error atol comes into play for + those elements of b that are very small or zero; it says how small a + must be also. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled( + masked_array(d1, copy=False, mask=m), fill_value + ).astype(np.float64) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(np.float64) + d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)) + return d.ravel() + + +def almost(a, b, decimal=6, fill_value=True): + """ + Returns True if a and b are equal up to decimal places. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled( + masked_array(d1, copy=False, mask=m), fill_value + ).astype(np.float64) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(np.float64) + d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal) + return d.ravel() + + +def _assert_equal_on_sequences(actual, desired, err_msg=''): + """ + Asserts the equality of two non-array sequences. + + """ + assert_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + return + + +def assert_equal_records(a, b): + """ + Asserts that two records are equal. + + Pretty crude for now. + + """ + assert_equal(a.dtype, b.dtype) + for f in a.dtype.names: + (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) + if not (af is masked) and not (bf is masked): + assert_equal(operator.getitem(a, f), operator.getitem(b, f)) + return + + +def assert_equal(actual, desired, err_msg=''): + """ + Asserts that two items are equal. + + """ + # Case #1: dictionary ..... + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(f"{k} not in {actual}") + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + return + # Case #2: lists ..... + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + return _assert_equal_on_sequences(actual, desired, err_msg='') + if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): + msg = build_err_msg([actual, desired], err_msg,) + if not desired == actual: + raise AssertionError(msg) + return + # Case #4. arrays or equivalent + if ((actual is masked) and not (desired is masked)) or \ + ((desired is masked) and not (actual is masked)): + msg = build_err_msg([actual, desired], + err_msg, header='', names=('x', 'y')) + raise ValueError(msg) + actual = np.asanyarray(actual) + desired = np.asanyarray(desired) + (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype) + if actual_dtype.char == "S" and desired_dtype.char == "S": + return _assert_equal_on_sequences(actual.tolist(), + desired.tolist(), + err_msg='') + return assert_array_equal(actual, desired, err_msg) + + +def fail_if_equal(actual, desired, err_msg='',): + """ + Raises an assertion error if two items are equal. + + """ + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + fail_if_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(repr(k)) + fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + fail_if_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + return + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return fail_if_array_equal(actual, desired, err_msg) + msg = build_err_msg([actual, desired], err_msg) + if not desired != actual: + raise AssertionError(msg) + + +assert_not_equal = fail_if_equal + + +def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): + """ + Asserts that two items are almost equal. + + The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal). + + """ + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return assert_array_almost_equal(actual, desired, decimal=decimal, + err_msg=err_msg, verbose=verbose) + msg = build_err_msg([actual, desired], + err_msg=err_msg, verbose=verbose) + if not round(abs(desired - actual), decimal) == 0: + raise AssertionError(msg) + + +assert_close = assert_almost_equal + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', + fill_value=True): + """ + Asserts that comparison between two masked arrays is satisfied. + + The comparison is elementwise. + + """ + # Allocate a common mask and refill + m = mask_or(getmask(x), getmask(y)) + x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) + y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) + if ((x is masked) and not (y is masked)) or \ + ((y is masked) and not (x is masked)): + msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, + header=header, names=('x', 'y')) + raise ValueError(msg) + # OK, now run the basic tests on filled versions + return np.testing.assert_array_compare(comparison, + x.filled(fill_value), + y.filled(fill_value), + err_msg=err_msg, + verbose=verbose, header=header) + + +def assert_array_equal(x, y, err_msg='', verbose=True): + """ + Checks the elementwise equality of two masked arrays. + + """ + assert_array_compare(operator.__eq__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def fail_if_array_equal(x, y, err_msg='', verbose=True): + """ + Raises an assertion error if two masked arrays are not equal elementwise. + + """ + def compare(x, y): + return (not np.all(approx(x, y))) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return approx(x, y, rtol=10. ** -decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return almost(x, y, decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_less(x, y, err_msg='', verbose=True): + """ + Checks that x is smaller than y elementwise. + + """ + assert_array_compare(operator.__lt__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not less-ordered') + + +def assert_mask_equal(m1, m2, err_msg=''): + """ + Asserts the equality of two masks. + + """ + if m1 is nomask: + assert_(m2 is nomask) + if m2 is nomask: + assert_(m1 is nomask) + assert_array_equal(m1, m2, err_msg=err_msg) diff --git a/phivenv/Lib/site-packages/numpy/ma/timer_comparison.py b/phivenv/Lib/site-packages/numpy/ma/timer_comparison.py new file mode 100644 index 0000000000000000000000000000000000000000..e39133e2521d35dbbdfa725ce1ed7476ff02af46 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/ma/timer_comparison.py @@ -0,0 +1,442 @@ +import timeit +from functools import reduce + +import numpy as np +import numpy._core.fromnumeric as fromnumeric + +from numpy.testing import build_err_msg + + +pi = np.pi + +class ModuleTester: + def __init__(self, module): + self.module = module + self.allequal = module.allequal + self.arange = module.arange + self.array = module.array + self.concatenate = module.concatenate + self.count = module.count + self.equal = module.equal + self.filled = module.filled + self.getmask = module.getmask + self.getmaskarray = module.getmaskarray + self.id = id + self.inner = module.inner + self.make_mask = module.make_mask + self.masked = module.masked + self.masked_array = module.masked_array + self.masked_values = module.masked_values + self.mask_or = module.mask_or + self.nomask = module.nomask + self.ones = module.ones + self.outer = module.outer + self.repeat = module.repeat + self.resize = module.resize + self.sort = module.sort + self.take = module.take + self.transpose = module.transpose + self.zeros = module.zeros + self.MaskType = module.MaskType + try: + self.umath = module.umath + except AttributeError: + self.umath = module.core.umath + self.testnames = [] + + def assert_array_compare(self, comparison, x, y, err_msg='', header='', + fill_value=True): + """ + Assert that a comparison of two masked arrays is satisfied elementwise. + + """ + xf = self.filled(x) + yf = self.filled(y) + m = self.mask_or(self.getmask(x), self.getmask(y)) + + x = self.filled(self.masked_array(xf, mask=m), fill_value) + y = self.filled(self.masked_array(yf, mask=m), fill_value) + if (x.dtype.char != "O"): + x = x.astype(np.float64) + if isinstance(x, np.ndarray) and x.size > 1: + x[np.isnan(x)] = 0 + elif np.isnan(x): + x = 0 + if (y.dtype.char != "O"): + y = y.astype(np.float64) + if isinstance(y, np.ndarray) and y.size > 1: + y[np.isnan(y)] = 0 + elif np.isnan(y): + y = 0 + try: + cond = (x.shape == () or y.shape == ()) or x.shape == y.shape + if not cond: + msg = build_err_msg([x, y], + err_msg + + f'\n(shapes {x.shape}, {y.shape} mismatch)', + header=header, + names=('x', 'y')) + assert cond, msg + val = comparison(x, y) + if m is not self.nomask and fill_value: + val = self.masked_array(val, mask=m) + if isinstance(val, bool): + cond = val + reduced = [0] + else: + reduced = val.ravel() + cond = reduced.all() + reduced = reduced.tolist() + if not cond: + match = 100-100.0*reduced.count(1)/len(reduced) + msg = build_err_msg([x, y], + err_msg + + '\n(mismatch %s%%)' % (match,), + header=header, + names=('x', 'y')) + assert cond, msg + except ValueError as e: + msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) + raise ValueError(msg) from e + + def assert_array_equal(self, x, y, err_msg=''): + """ + Checks the elementwise equality of two masked arrays. + + """ + self.assert_array_compare(self.equal, x, y, err_msg=err_msg, + header='Arrays are not equal') + + @np.errstate(all='ignore') + def test_0(self): + """ + Tests creation + + """ + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + xm = self.masked_array(x, mask=m) + xm[0] + + @np.errstate(all='ignore') + def test_1(self): + """ + Tests creation + + """ + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = self.masked_array(x, mask=m1) + ym = self.masked_array(y, mask=m2) + xf = np.where(m1, 1.e+20, x) + xm.set_fill_value(1.e+20) + + assert((xm-ym).filled(0).any()) + s = x.shape + assert(xm.size == reduce(lambda x, y:x*y, s)) + assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + + for s in [(4, 3), (6, 2)]: + x.shape = s + y.shape = s + xm.shape = s + ym.shape = s + xf.shape = s + assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + + @np.errstate(all='ignore') + def test_2(self): + """ + Tests conversions and indexing. + + """ + x1 = np.array([1, 2, 4, 3]) + x2 = self.array(x1, mask=[1, 0, 0, 0]) + x3 = self.array(x1, mask=[0, 1, 0, 1]) + x4 = self.array(x1) + # test conversion to strings, no errors + str(x2) + repr(x2) + # tests of indexing + assert type(x2[1]) is type(x1[1]) + assert x1[1] == x2[1] + x1[2] = 9 + x2[2] = 9 + self.assert_array_equal(x1, x2) + x1[1:3] = 99 + x2[1:3] = 99 + x2[1] = self.masked + x2[1:3] = self.masked + x2[:] = x1 + x2[1] = self.masked + x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + x1 = np.arange(5)*1.0 + x2 = self.masked_values(x1, 3.0) + x1 = self.array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + # check that no error occurs. + x1[1] + x2[1] + assert x1[1:1].shape == (0,) + # Tests copy-size + n = [0, 0, 1, 0, 0] + m = self.make_mask(n) + m2 = self.make_mask(m) + assert(m is m2) + m3 = self.make_mask(m, copy=1) + assert(m is not m3) + + @np.errstate(all='ignore') + def test_3(self): + """ + Tests resize/repeat + + """ + x4 = self.arange(4) + x4[2] = self.masked + y4 = self.resize(x4, (8,)) + assert self.allequal(self.concatenate([x4, x4]), y4) + assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = self.repeat(x4, (2, 2, 2, 2), axis=0) + self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) + y6 = self.repeat(x4, 2, axis=0) + assert self.allequal(y5, y6) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert self.allequal(y5, y7) + y8 = x4.repeat(2, 0) + assert self.allequal(y5, y8) + + @np.errstate(all='ignore') + def test_4(self): + """ + Test of take, transpose, inner, outer products. + + """ + x = self.arange(24) + y = np.arange(24) + x[5:6] = self.masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) + assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) + assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), + self.inner(x, y)) + assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), + self.outer(x, y)) + y = self.array(['abc', 1, 'def', 2, 3], object) + y[2] = self.masked + t = self.take(y, [0, 3, 4]) + assert t[0] == 'abc' + assert t[1] == 2 + assert t[2] == 3 + + @np.errstate(all='ignore') + def test_5(self): + """ + Tests inplace w/ scalar + + """ + x = self.arange(10) + y = self.arange(10) + xm = self.arange(10) + xm[2] = self.masked + x += 1 + assert self.allequal(x, y+1) + xm += 1 + assert self.allequal(xm, y+1) + + x = self.arange(10) + xm = self.arange(10) + xm[2] = self.masked + x -= 1 + assert self.allequal(x, y-1) + xm -= 1 + assert self.allequal(xm, y-1) + + x = self.arange(10)*1.0 + xm = self.arange(10)*1.0 + xm[2] = self.masked + x *= 2.0 + assert self.allequal(x, y*2) + xm *= 2.0 + assert self.allequal(xm, y*2) + + x = self.arange(10)*2 + xm = self.arange(10)*2 + xm[2] = self.masked + x /= 2 + assert self.allequal(x, y) + xm /= 2 + assert self.allequal(xm, y) + + x = self.arange(10)*1.0 + xm = self.arange(10)*1.0 + xm[2] = self.masked + x /= 2.0 + assert self.allequal(x, y/2.0) + xm /= self.arange(10) + self.assert_array_equal(xm, self.ones((10,))) + + x = self.arange(10).astype(np.float64) + xm = self.arange(10) + xm[2] = self.masked + x += 1. + assert self.allequal(x, y + 1.) + + @np.errstate(all='ignore') + def test_6(self): + """ + Tests inplace w/ array + + """ + x = self.arange(10, dtype=np.float64) + y = self.arange(10) + xm = self.arange(10, dtype=np.float64) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=np.float64) + a[-1] = self.masked + x += a + xm += a + assert self.allequal(x, y+a) + assert self.allequal(xm, y+a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=np.float64) + xm = self.arange(10, dtype=np.float64) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=np.float64) + a[-1] = self.masked + x -= a + xm -= a + assert self.allequal(x, y-a) + assert self.allequal(xm, y-a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=np.float64) + xm = self.arange(10, dtype=np.float64) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=np.float64) + a[-1] = self.masked + x *= a + xm *= a + assert self.allequal(x, y*a) + assert self.allequal(xm, y*a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=np.float64) + xm = self.arange(10, dtype=np.float64) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=np.float64) + a[-1] = self.masked + x /= a + xm /= a + + @np.errstate(all='ignore') + def test_7(self): + "Tests ufunc" + d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6), + self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),) + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', +# 'sin', 'cos', 'tan', +# 'arcsin', 'arccos', 'arctan', +# 'sinh', 'cosh', 'tanh', +# 'arcsinh', +# 'arccosh', +# 'arctanh', +# 'absolute', 'fabs', 'negative', +# # 'nonzero', 'around', +# 'floor', 'ceil', +# # 'sometrue', 'alltrue', +# 'logical_not', +# 'add', 'subtract', 'multiply', +# 'divide', 'true_divide', 'floor_divide', +# 'remainder', 'fmod', 'hypot', 'arctan2', +# 'equal', 'not_equal', 'less_equal', 'greater_equal', +# 'less', 'greater', +# 'logical_and', 'logical_or', 'logical_xor', + ]: + try: + uf = getattr(self.umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(self.module, f) + args = d[:uf.nin] + ur = uf(*args) + mr = mf(*args) + self.assert_array_equal(ur.filled(0), mr.filled(0), f) + self.assert_array_equal(ur._mask, mr._mask) + + @np.errstate(all='ignore') + def test_99(self): + # test average + ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + self.assert_array_equal(2.0, self.average(ott, axis=0)) + self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.])) + result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1) + self.assert_array_equal(2.0, result) + assert(wts == 4.0) + ott[:] = self.masked + assert(self.average(ott, axis=0) is self.masked) + ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = self.masked + self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0]) + assert(self.average(ott, axis=1)[0] is self.masked) + self.assert_array_equal([2., 0.], self.average(ott, axis=0)) + result, wts = self.average(ott, axis=0, returned=1) + self.assert_array_equal(wts, [1., 0.]) + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = self.arange(6) + self.assert_array_equal(self.average(x, axis=0), 2.5) + self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5) + y = self.array([self.arange(6), 2.0*self.arange(6)]) + self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.) + self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.) + self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) + self.assert_array_equal(self.average(y, None, weights=w2), 20./6.) + self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.]) + self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) + m1 = self.zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = self.ones(6) + m5 = [0, 1, 1, 1, 1, 1] + self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5) + self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5) + self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0) + self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0) + z = self.masked_array(y, m3) + self.assert_array_equal(self.average(z, None), 20./6.) + self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) + self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) + self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) + + @np.errstate(all='ignore') + def test_A(self): + x = self.arange(24) + x[5:6] = self.masked + x = x.reshape(2, 3, 4) + + +if __name__ == '__main__': + setup_base = ("from __main__ import ModuleTester \n" + "import numpy\n" + "tester = ModuleTester(module)\n") + setup_cur = "import numpy.ma.core as module\n" + setup_base + (nrepeat, nloop) = (10, 10) + + for i in range(1, 8): + func = 'tester.test_%i()' % i + cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) + cur = np.sort(cur) + print("#%i" % i + 50*'.') + print(eval("ModuleTester.test_%i.__doc__" % i)) + print(f'core_current : {cur[0]:.3f} - {cur[1]:.3f}') diff --git a/phivenv/Lib/site-packages/numpy/matlib.py b/phivenv/Lib/site-packages/numpy/matlib.py new file mode 100644 index 0000000000000000000000000000000000000000..807f5be844bed5be9118323b7ad922029f96c779 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/matlib.py @@ -0,0 +1,381 @@ +import warnings + +# 2018-05-29, PendingDeprecationWarning added to matrix.__new__ +# 2020-01-23, numpy 1.19.0 PendingDeprecatonWarning +warnings.warn("Importing from numpy.matlib is deprecated since 1.19.0. " + "The matrix subclass is not the recommended way to represent " + "matrices or deal with linear algebra (see " + "https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). " + "Please adjust your code to use regular ndarray. ", + PendingDeprecationWarning, stacklevel=2) + +import numpy as np +from numpy.matrixlib.defmatrix import matrix, asmatrix +# Matlib.py contains all functions in the numpy namespace with a few +# replacements. See doc/source/reference/routines.matlib.rst for details. +# Need * as we're copying the numpy namespace. +from numpy import * # noqa: F403 + +__version__ = np.__version__ + +__all__ = np.__all__[:] # copy numpy namespace +__all__ += ['rand', 'randn', 'repmat'] + +def empty(shape, dtype=None, order='C'): + """Return a new matrix of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty matrix. + dtype : data-type, optional + Desired output data-type. + order : {'C', 'F'}, optional + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + + See Also + -------- + numpy.empty : Equivalent array function. + matlib.zeros : Return a matrix of zeros. + matlib.ones : Return a matrix of ones. + + Notes + ----- + Unlike other matrix creation functions (e.g. `matlib.zeros`, + `matlib.ones`), `matlib.empty` does not initialize the values of the + matrix, and may therefore be marginally faster. However, the values + stored in the newly allocated matrix are arbitrary. For reproducible + behavior, be sure to set each element of the matrix before reading. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.empty((2, 2)) # filled with random data + matrix([[ 6.76425276e-320, 9.79033856e-307], # random + [ 7.39337286e-309, 3.22135945e-309]]) + >>> np.matlib.empty((2, 2), dtype=int) + matrix([[ 6600475, 0], # random + [ 6586976, 22740995]]) + + """ + return ndarray.__new__(matrix, shape, dtype, order=order) + +def ones(shape, dtype=None, order='C'): + """ + Matrix of ones. + + Return a matrix of given shape and type, filled with ones. + + Parameters + ---------- + shape : {sequence of ints, int} + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is np.float64. + order : {'C', 'F'}, optional + Whether to store matrix in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Matrix of ones of given shape, dtype, and order. + + See Also + -------- + ones : Array of ones. + matlib.zeros : Zero matrix. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> np.matlib.ones((2,3)) + matrix([[1., 1., 1.], + [1., 1., 1.]]) + + >>> np.matlib.ones(2) + matrix([[1., 1.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(1) + return a + +def zeros(shape, dtype=None, order='C'): + """ + Return a matrix of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is float. + order : {'C', 'F'}, optional + Whether to store the result in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Zero matrix of given shape, dtype, and order. + + See Also + -------- + numpy.zeros : Equivalent array function. + matlib.ones : Return a matrix of ones. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.zeros((2, 3)) + matrix([[0., 0., 0.], + [0., 0., 0.]]) + + >>> np.matlib.zeros(2) + matrix([[0., 0.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(0) + return a + +def identity(n,dtype=None): + """ + Returns the square identity matrix of given size. + + Parameters + ---------- + n : int + Size of the returned identity matrix. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + + Returns + ------- + out : matrix + `n` x `n` matrix with its main diagonal set to one, + and all other elements zero. + + See Also + -------- + numpy.identity : Equivalent array function. + matlib.eye : More general matrix identity function. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.identity(3, dtype=int) + matrix([[1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + + """ + a = array([1]+n*[0], dtype=dtype) + b = empty((n, n), dtype=dtype) + b.flat = a + return b + +def eye(n,M=None, k=0, dtype=float, order='C'): + """ + Return a matrix with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + n : int + Number of rows in the output. + M : int, optional + Number of columns in the output, defaults to `n`. + k : int, optional + Index of the diagonal: 0 refers to the main diagonal, + a positive value refers to an upper diagonal, + and a negative value to a lower diagonal. + dtype : dtype, optional + Data-type of the returned matrix. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + + .. versionadded:: 1.14.0 + + Returns + ------- + I : matrix + A `n` x `M` matrix where all elements are equal to zero, + except for the `k`-th diagonal, whose values are equal to one. + + See Also + -------- + numpy.eye : Equivalent array function. + identity : Square identity matrix. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.eye(3, k=1, dtype=float) + matrix([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + + """ + return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order)) + +def rand(*args): + """ + Return a matrix of random values with given shape. + + Create a matrix of the given shape and propagate it with + random samples from a uniform distribution over ``[0, 1)``. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. + If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + out : ndarray + The matrix of random values with shape given by `\\*args`. + + See Also + -------- + randn, numpy.random.RandomState.rand + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.rand(2, 3) + matrix([[0.69646919, 0.28613933, 0.22685145], + [0.55131477, 0.71946897, 0.42310646]]) + >>> np.matlib.rand((2, 3)) + matrix([[0.9807642 , 0.68482974, 0.4809319 ], + [0.39211752, 0.34317802, 0.72904971]]) + + If the first argument is a tuple, other arguments are ignored: + + >>> np.matlib.rand((2, 3), 4) + matrix([[0.43857224, 0.0596779 , 0.39804426], + [0.73799541, 0.18249173, 0.17545176]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.rand(*args)) + +def randn(*args): + """ + Return a random matrix with data from the "standard normal" distribution. + + `randn` generates a matrix filled with random floats sampled from a + univariate "normal" (Gaussian) distribution of mean 0 and variance 1. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + Z : matrix of floats + A matrix of floating-point samples drawn from the standard normal + distribution. + + See Also + -------- + rand, numpy.random.RandomState.randn + + Notes + ----- + For random samples from the normal distribution with mean ``mu`` and + standard deviation ``sigma``, use:: + + sigma * np.matlib.randn(...) + mu + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.randn(1) + matrix([[-1.0856306]]) + >>> np.matlib.randn(1, 2, 3) + matrix([[ 0.99734545, 0.2829785 , -1.50629471], + [-0.57860025, 1.65143654, -2.42667924]]) + + Two-by-four matrix of samples from the normal distribution with + mean 3 and standard deviation 2.5: + + >>> 2.5 * np.matlib.randn((2, 4)) + 3 + matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462], + [2.76322758, 6.72847407, 1.40274501, 1.8900451 ]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.randn(*args)) + +def repmat(a, m, n): + """ + Repeat a 0-D to 2-D array or matrix MxN times. + + Parameters + ---------- + a : array_like + The array or matrix to be repeated. + m, n : int + The number of times `a` is repeated along the first and second axes. + + Returns + ------- + out : ndarray + The result of repeating `a`. + + Examples + -------- + >>> import numpy.matlib + >>> a0 = np.array(1) + >>> np.matlib.repmat(a0, 2, 3) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> a1 = np.arange(4) + >>> np.matlib.repmat(a1, 2, 2) + array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + + >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3)) + >>> np.matlib.repmat(a2, 2, 3) + matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5], + [0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5]]) + + """ + a = asanyarray(a) + ndim = a.ndim + if ndim == 0: + origrows, origcols = (1, 1) + elif ndim == 1: + origrows, origcols = (1, a.shape[0]) + else: + origrows, origcols = a.shape + rows = origrows * m + cols = origcols * n + c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0) + return c.reshape(rows, cols) diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/__init__.py b/phivenv/Lib/site-packages/numpy/matrixlib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..51e1cab8c14850f4c1258af508ec8c7d412e42c7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/matrixlib/__init__.py @@ -0,0 +1,11 @@ +"""Sub-package containing the matrix class and related functions. + +""" +from . import defmatrix +from .defmatrix import * + +__all__ = defmatrix.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/__init__.pyi b/phivenv/Lib/site-packages/numpy/matrixlib/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f967337676c95296e3243e40f1eeb5127eb270b3 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/matrixlib/__init__.pyi @@ -0,0 +1,14 @@ +from numpy._pytesttester import PytestTester + +from numpy import ( + matrix as matrix, +) + +from numpy.matrixlib.defmatrix import ( + bmat as bmat, + mat as mat, + asmatrix as asmatrix, +) + +__all__: list[str] +test: PytestTester diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/matrixlib/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..366e3f142acde81bf9fcc0ce5228823e3e31755b Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/matrixlib/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/__pycache__/defmatrix.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/matrixlib/__pycache__/defmatrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ae081f034369c32c253e898da1e5d1e7a051a3d Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/matrixlib/__pycache__/defmatrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/defmatrix.py b/phivenv/Lib/site-packages/numpy/matrixlib/defmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..ed277e27164c749c5ca8c3ae1bf37eb307d0854d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/matrixlib/defmatrix.py @@ -0,0 +1,1114 @@ +__all__ = ['matrix', 'bmat', 'asmatrix'] + +import sys +import warnings +import ast + +from .._utils import set_module +import numpy._core.numeric as N +from numpy._core.numeric import concatenate, isscalar +# While not in __all__, matrix_power used to be defined here, so we import +# it for backward compatibility. +from numpy.linalg import matrix_power + + +def _convert_from_string(data): + for char in '[]': + data = data.replace(char, '') + + rows = data.split(';') + newdata = [] + count = 0 + for row in rows: + trow = row.split(',') + newrow = [] + for col in trow: + temp = col.split() + newrow.extend(map(ast.literal_eval, temp)) + if count == 0: + Ncols = len(newrow) + elif len(newrow) != Ncols: + raise ValueError("Rows not the same size.") + count += 1 + newdata.append(newrow) + return newdata + + +@set_module('numpy') +def asmatrix(data, dtype=None): + """ + Interpret the input as a matrix. + + Unlike `matrix`, `asmatrix` does not make a copy if the input is already + a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``. + + Parameters + ---------- + data : array_like + Input data. + dtype : data-type + Data-type of the output matrix. + + Returns + ------- + mat : matrix + `data` interpreted as a matrix. + + Examples + -------- + >>> x = np.array([[1, 2], [3, 4]]) + + >>> m = np.asmatrix(x) + + >>> x[0,0] = 5 + + >>> m + matrix([[5, 2], + [3, 4]]) + + """ + return matrix(data, dtype=dtype, copy=False) + + +@set_module('numpy') +class matrix(N.ndarray): + """ + matrix(data, dtype=None, copy=True) + + Returns a matrix from an array-like object, or from a string of data. + + A matrix is a specialized 2-D array that retains its 2-D nature + through operations. It has certain special operators, such as ``*`` + (matrix multiplication) and ``**`` (matrix power). + + .. note:: It is no longer recommended to use this class, even for linear + algebra. Instead use regular arrays. The class may be removed + in the future. + + Parameters + ---------- + data : array_like or string + If `data` is a string, it is interpreted as a matrix with commas + or spaces separating columns, and semicolons separating rows. + dtype : data-type + Data-type of the output matrix. + copy : bool + If `data` is already an `ndarray`, then this flag determines + whether the data is copied (the default), or whether a view is + constructed. + + See Also + -------- + array + + Examples + -------- + >>> a = np.matrix('1 2; 3 4') + >>> a + matrix([[1, 2], + [3, 4]]) + + >>> np.matrix([[1, 2], [3, 4]]) + matrix([[1, 2], + [3, 4]]) + + """ + __array_priority__ = 10.0 + def __new__(subtype, data, dtype=None, copy=True): + warnings.warn('the matrix subclass is not the recommended way to ' + 'represent matrices or deal with linear algebra (see ' + 'https://docs.scipy.org/doc/numpy/user/' + 'numpy-for-matlab-users.html). ' + 'Please adjust your code to use regular ndarray.', + PendingDeprecationWarning, stacklevel=2) + if isinstance(data, matrix): + dtype2 = data.dtype + if (dtype is None): + dtype = dtype2 + if (dtype2 == dtype) and (not copy): + return data + return data.astype(dtype) + + if isinstance(data, N.ndarray): + if dtype is None: + intype = data.dtype + else: + intype = N.dtype(dtype) + new = data.view(subtype) + if intype != data.dtype: + return new.astype(intype) + if copy: return new.copy() + else: return new + + if isinstance(data, str): + data = _convert_from_string(data) + + # now convert data to an array + copy = None if not copy else True + arr = N.array(data, dtype=dtype, copy=copy) + ndim = arr.ndim + shape = arr.shape + if (ndim > 2): + raise ValueError("matrix must be 2-dimensional") + elif ndim == 0: + shape = (1, 1) + elif ndim == 1: + shape = (1, shape[0]) + + order = 'C' + if (ndim == 2) and arr.flags.fortran: + order = 'F' + + if not (order or arr.flags.contiguous): + arr = arr.copy() + + ret = N.ndarray.__new__(subtype, shape, arr.dtype, + buffer=arr, + order=order) + return ret + + def __array_finalize__(self, obj): + self._getitem = False + if (isinstance(obj, matrix) and obj._getitem): return + ndim = self.ndim + if (ndim == 2): + return + if (ndim > 2): + newshape = tuple([x for x in self.shape if x > 1]) + ndim = len(newshape) + if ndim == 2: + self.shape = newshape + return + elif (ndim > 2): + raise ValueError("shape too large to be a matrix.") + else: + newshape = self.shape + if ndim == 0: + self.shape = (1, 1) + elif ndim == 1: + self.shape = (1, newshape[0]) + return + + def __getitem__(self, index): + self._getitem = True + + try: + out = N.ndarray.__getitem__(self, index) + finally: + self._getitem = False + + if not isinstance(out, N.ndarray): + return out + + if out.ndim == 0: + return out[()] + if out.ndim == 1: + sh = out.shape[0] + # Determine when we should have a column array + try: + n = len(index) + except Exception: + n = 0 + if n > 1 and isscalar(index[1]): + out.shape = (sh, 1) + else: + out.shape = (1, sh) + return out + + def __mul__(self, other): + if isinstance(other, (N.ndarray, list, tuple)) : + # This promotes 1-D vectors to row vectors + return N.dot(self, asmatrix(other)) + if isscalar(other) or not hasattr(other, '__rmul__') : + return N.dot(self, other) + return NotImplemented + + def __rmul__(self, other): + return N.dot(other, self) + + def __imul__(self, other): + self[:] = self * other + return self + + def __pow__(self, other): + return matrix_power(self, other) + + def __ipow__(self, other): + self[:] = self ** other + return self + + def __rpow__(self, other): + return NotImplemented + + def _align(self, axis): + """A convenience function for operations that need to preserve axis + orientation. + """ + if axis is None: + return self[0, 0] + elif axis==0: + return self + elif axis==1: + return self.transpose() + else: + raise ValueError("unsupported axis") + + def _collapse(self, axis): + """A convenience function for operations that want to collapse + to a scalar like _align, but are using keepdims=True + """ + if axis is None: + return self[0, 0] + else: + return self + + # Necessary because base-class tolist expects dimension + # reduction by x[0] + def tolist(self): + """ + Return the matrix as a (possibly nested) list. + + See `ndarray.tolist` for full documentation. + + See Also + -------- + ndarray.tolist + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.tolist() + [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + + """ + return self.__array__().tolist() + + # To preserve orientation of result... + def sum(self, axis=None, dtype=None, out=None): + """ + Returns the sum of the matrix elements, along the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum + + Notes + ----- + This is the same as `ndarray.sum`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix([[1, 2], [4, 3]]) + >>> x.sum() + 10 + >>> x.sum(axis=1) + matrix([[3], + [7]]) + >>> x.sum(axis=1, dtype='float') + matrix([[3.], + [7.]]) + >>> out = np.zeros((2, 1), dtype='float') + >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) + matrix([[3.], + [7.]]) + + """ + return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) + + + # To update docstring from array to matrix... + def squeeze(self, axis=None): + """ + Return a possibly reshaped matrix. + + Refer to `numpy.squeeze` for more documentation. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Selects a subset of the axes of length one in the shape. + If an axis is selected with shape entry greater than one, + an error is raised. + + Returns + ------- + squeezed : matrix + The matrix, but as a (1, N) matrix if it had shape (N, 1). + + See Also + -------- + numpy.squeeze : related function + + Notes + ----- + If `m` has a single column then that column is returned + as the single row of a matrix. Otherwise `m` is returned. + The returned matrix is always either `m` itself or a view into `m`. + Supplying an axis keyword argument will not affect the returned matrix + but it may cause an error to be raised. + + Examples + -------- + >>> c = np.matrix([[1], [2]]) + >>> c + matrix([[1], + [2]]) + >>> c.squeeze() + matrix([[1, 2]]) + >>> r = c.T + >>> r + matrix([[1, 2]]) + >>> r.squeeze() + matrix([[1, 2]]) + >>> m = np.matrix([[1, 2], [3, 4]]) + >>> m.squeeze() + matrix([[1, 2], + [3, 4]]) + + """ + return N.ndarray.squeeze(self, axis=axis) + + + # To update docstring from array to matrix... + def flatten(self, order='C'): + """ + Return a flattened copy of the matrix. + + All `N` elements of the matrix are placed into a single row. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. 'F' means to + flatten in column-major (Fortran-style) order. 'A' means to + flatten in column-major order if `m` is Fortran *contiguous* in + memory, row-major order otherwise. 'K' means to flatten `m` in + the order the elements occur in memory. The default is 'C'. + + Returns + ------- + y : matrix + A copy of the matrix, flattened to a `(1, N)` matrix where `N` + is the number of elements in the original matrix. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the matrix. + + Examples + -------- + >>> m = np.matrix([[1,2], [3,4]]) + >>> m.flatten() + matrix([[1, 2, 3, 4]]) + >>> m.flatten('F') + matrix([[1, 3, 2, 4]]) + + """ + return N.ndarray.flatten(self, order=order) + + def mean(self, axis=None, dtype=None, out=None): + """ + Returns the average of the matrix elements along the given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean + + Notes + ----- + Same as `ndarray.mean` except that, where that returns an `ndarray`, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.mean() + 5.5 + >>> x.mean(0) + matrix([[4., 5., 6., 7.]]) + >>> x.mean(1) + matrix([[ 1.5], + [ 5.5], + [ 9.5]]) + + """ + return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def std(self, axis=None, dtype=None, out=None, ddof=0): + """ + Return the standard deviation of the array elements along the given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std + + Notes + ----- + This is the same as `ndarray.std`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.std() + 3.4520525295346629 # may vary + >>> x.std(0) + matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) # may vary + >>> x.std(1) + matrix([[ 1.11803399], + [ 1.11803399], + [ 1.11803399]]) + + """ + return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0): + """ + Returns the variance of the matrix elements, along the given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var + + Notes + ----- + This is the same as `ndarray.var`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.var() + 11.916666666666666 + >>> x.var(0) + matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) # may vary + >>> x.var(1) + matrix([[1.25], + [1.25], + [1.25]]) + + """ + return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + + def prod(self, axis=None, dtype=None, out=None): + """ + Return the product of the array elements over the given axis. + + Refer to `prod` for full documentation. + + See Also + -------- + prod, ndarray.prod + + Notes + ----- + Same as `ndarray.prod`, except, where that returns an `ndarray`, this + returns a `matrix` object instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.prod() + 0 + >>> x.prod(0) + matrix([[ 0, 45, 120, 231]]) + >>> x.prod(1) + matrix([[ 0], + [ 840], + [7920]]) + + """ + return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def any(self, axis=None, out=None): + """ + Test whether any array element along a given axis evaluates to True. + + Refer to `numpy.any` for full documentation. + + Parameters + ---------- + axis : int, optional + Axis along which logical OR is performed + out : ndarray, optional + Output to existing array instead of creating new one, must have + same shape as expected output + + Returns + ------- + any : bool, ndarray + Returns a single bool if `axis` is ``None``; otherwise, + returns `ndarray` + + """ + return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis) + + def all(self, axis=None, out=None): + """ + Test whether all matrix elements along a given axis evaluate to True. + + Parameters + ---------- + See `numpy.all` for complete descriptions + + See Also + -------- + numpy.all + + Notes + ----- + This is the same as `ndarray.all`, but it returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> y = x[0]; y + matrix([[0, 1, 2, 3]]) + >>> (x == y) + matrix([[ True, True, True, True], + [False, False, False, False], + [False, False, False, False]]) + >>> (x == y).all() + False + >>> (x == y).all(0) + matrix([[False, False, False, False]]) + >>> (x == y).all(1) + matrix([[ True], + [False], + [False]]) + + """ + return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis) + + def max(self, axis=None, out=None): + """ + Return the maximum value along an axis. + + Parameters + ---------- + See `amax` for complete descriptions + + See Also + -------- + amax, ndarray.max + + Notes + ----- + This is the same as `ndarray.max`, but returns a `matrix` object + where `ndarray.max` would return an ndarray. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.max() + 11 + >>> x.max(0) + matrix([[ 8, 9, 10, 11]]) + >>> x.max(1) + matrix([[ 3], + [ 7], + [11]]) + + """ + return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis) + + def argmax(self, axis=None, out=None): + """ + Indexes of the maximum values along an axis. + + Return the indexes of the first occurrences of the maximum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmax` for complete descriptions + + See Also + -------- + numpy.argmax + + Notes + ----- + This is the same as `ndarray.argmax`, but returns a `matrix` object + where `ndarray.argmax` would return an `ndarray`. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.argmax() + 11 + >>> x.argmax(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmax(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmax(self, axis, out)._align(axis) + + def min(self, axis=None, out=None): + """ + Return the minimum value along an axis. + + Parameters + ---------- + See `amin` for complete descriptions. + + See Also + -------- + amin, ndarray.min + + Notes + ----- + This is the same as `ndarray.min`, but returns a `matrix` object + where `ndarray.min` would return an ndarray. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.min() + -11 + >>> x.min(0) + matrix([[ -8, -9, -10, -11]]) + >>> x.min(1) + matrix([[ -3], + [ -7], + [-11]]) + + """ + return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis) + + def argmin(self, axis=None, out=None): + """ + Indexes of the minimum values along an axis. + + Return the indexes of the first occurrences of the minimum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmin` for complete descriptions. + + See Also + -------- + numpy.argmin + + Notes + ----- + This is the same as `ndarray.argmin`, but returns a `matrix` object + where `ndarray.argmin` would return an `ndarray`. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.argmin() + 11 + >>> x.argmin(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmin(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmin(self, axis, out)._align(axis) + + def ptp(self, axis=None, out=None): + """ + Peak-to-peak (maximum - minimum) value along the given axis. + + Refer to `numpy.ptp` for full documentation. + + See Also + -------- + numpy.ptp + + Notes + ----- + Same as `ndarray.ptp`, except, where that would return an `ndarray` object, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.ptp() + 11 + >>> x.ptp(0) + matrix([[8, 8, 8, 8]]) + >>> x.ptp(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ptp(self, axis, out)._align(axis) + + @property + def I(self): + """ + Returns the (multiplicative) inverse of invertible `self`. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + If `self` is non-singular, `ret` is such that ``ret * self`` == + ``self * ret`` == ``np.matrix(np.eye(self[0,:].size))`` all return + ``True``. + + Raises + ------ + numpy.linalg.LinAlgError: Singular matrix + If `self` is singular. + + See Also + -------- + linalg.inv + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]'); m + matrix([[1, 2], + [3, 4]]) + >>> m.getI() + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + >>> m.getI() * m + matrix([[ 1., 0.], # may vary + [ 0., 1.]]) + + """ + M, N = self.shape + if M == N: + from numpy.linalg import inv as func + else: + from numpy.linalg import pinv as func + return asmatrix(func(self)) + + @property + def A(self): + """ + Return `self` as an `ndarray` object. + + Equivalent to ``np.asarray(self)``. + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self` as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA() + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + + """ + return self.__array__() + + @property + def A1(self): + """ + Return `self` as a flattened `ndarray`. + + Equivalent to ``np.asarray(x).ravel()`` + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self`, 1-D, as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA1() + array([ 0, 1, 2, ..., 9, 10, 11]) + + + """ + return self.__array__().ravel() + + + def ravel(self, order='C'): + """ + Return a flattened matrix. + + Refer to `numpy.ravel` for more documentation. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `m` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + + Returns + ------- + ret : matrix + Return the matrix flattened to shape `(1, N)` where `N` + is the number of elements in the original matrix. + A copy is made only if necessary. + + See Also + -------- + matrix.flatten : returns a similar output matrix but always a copy + matrix.flat : a flat iterator on the array. + numpy.ravel : related function which returns an ndarray + + """ + return N.ndarray.ravel(self, order=order) + + @property + def T(self): + """ + Returns the transpose of the matrix. + + Does *not* conjugate! For the complex conjugate transpose, use ``.H``. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + The (non-conjugated) transpose of the matrix. + + See Also + -------- + transpose, getH + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]') + >>> m + matrix([[1, 2], + [3, 4]]) + >>> m.getT() + matrix([[1, 3], + [2, 4]]) + + """ + return self.transpose() + + @property + def H(self): + """ + Returns the (complex) conjugate transpose of `self`. + + Equivalent to ``np.transpose(self)`` if `self` is real-valued. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + complex conjugate transpose of `self` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))) + >>> z = x - 1j*x; z + matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j], + [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], + [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) + >>> z.getH() + matrix([[ 0. -0.j, 4. +4.j, 8. +8.j], + [ 1. +1.j, 5. +5.j, 9. +9.j], + [ 2. +2.j, 6. +6.j, 10.+10.j], + [ 3. +3.j, 7. +7.j, 11.+11.j]]) + + """ + if issubclass(self.dtype.type, N.complexfloating): + return self.transpose().conjugate() + else: + return self.transpose() + + # kept for compatibility + getT = T.fget + getA = A.fget + getA1 = A1.fget + getH = H.fget + getI = I.fget + +def _from_string(str, gdict, ldict): + rows = str.split(';') + rowtup = [] + for row in rows: + trow = row.split(',') + newrow = [] + for x in trow: + newrow.extend(x.split()) + trow = newrow + coltup = [] + for col in trow: + col = col.strip() + try: + thismat = ldict[col] + except KeyError: + try: + thismat = gdict[col] + except KeyError as e: + raise NameError(f"name {col!r} is not defined") from None + + coltup.append(thismat) + rowtup.append(concatenate(coltup, axis=-1)) + return concatenate(rowtup, axis=0) + + +@set_module('numpy') +def bmat(obj, ldict=None, gdict=None): + """ + Build a matrix object from a string, nested sequence, or array. + + Parameters + ---------- + obj : str or array_like + Input data. If a string, variables in the current scope may be + referenced by name. + ldict : dict, optional + A dictionary that replaces local operands in current frame. + Ignored if `obj` is not a string or `gdict` is None. + gdict : dict, optional + A dictionary that replaces global operands in current frame. + Ignored if `obj` is not a string. + + Returns + ------- + out : matrix + Returns a matrix object, which is a specialized 2-D array. + + See Also + -------- + block : + A generalization of this function for N-d arrays, that returns normal + ndarrays. + + Examples + -------- + >>> A = np.asmatrix('1 1; 1 1') + >>> B = np.asmatrix('2 2; 2 2') + >>> C = np.asmatrix('3 4; 5 6') + >>> D = np.asmatrix('7 8; 9 0') + + All the following expressions construct the same block matrix: + + >>> np.bmat([[A, B], [C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat('A,B; C,D') + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + + """ + if isinstance(obj, str): + if gdict is None: + # get previous frame + frame = sys._getframe().f_back + glob_dict = frame.f_globals + loc_dict = frame.f_locals + else: + glob_dict = gdict + loc_dict = ldict + + return matrix(_from_string(obj, glob_dict, loc_dict)) + + if isinstance(obj, (tuple, list)): + # [[A,B],[C,D]] + arr_rows = [] + for row in obj: + if isinstance(row, N.ndarray): # not 2-d + return matrix(concatenate(obj, axis=-1)) + else: + arr_rows.append(concatenate(row, axis=-1)) + return matrix(concatenate(arr_rows, axis=0)) + if isinstance(obj, N.ndarray): + return matrix(obj) diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/defmatrix.pyi b/phivenv/Lib/site-packages/numpy/matrixlib/defmatrix.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4521f08eb27074d8c4cebcc7f42bcbf6fdaef778 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/matrixlib/defmatrix.pyi @@ -0,0 +1,16 @@ +from collections.abc import Sequence, Mapping +from typing import Any +from numpy import matrix as matrix +from numpy._typing import ArrayLike, DTypeLike, NDArray + +__all__: list[str] + +def bmat( + obj: str | Sequence[ArrayLike] | NDArray[Any], + ldict: None | Mapping[str, Any] = ..., + gdict: None | Mapping[str, Any] = ..., +) -> matrix[Any, Any]: ... + +def asmatrix(data: ArrayLike, dtype: DTypeLike = ...) -> matrix[Any, Any]: ... + +mat = asmatrix diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/__init__.py b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5c8a5b6957614c069a76648f5562af12d34fa7b Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..befe04982b28e8e8ca34dc54cb5a3fbaa4cecb46 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_interaction.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_interaction.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58bd5d56b62ab70d2423f49c3c8d26a5ee243a33 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_interaction.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aae48aaf1c01ff5e1e8108d6064dafc948919878 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..922b25f90b3a4dc65c8f1ff1513c39a6ad2cdca3 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..470c433da4985196f81a7b287d79ffd2951b90c4 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93114ae8bb7e3c7de6f35a1068fc4929f794243c Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_regression.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_regression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b055437984b7e4bb59608d5882972825031022d8 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_regression.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_defmatrix.py b/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_defmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..026f9959191c8658025497893849272dcf85b783 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_defmatrix.py @@ -0,0 +1,453 @@ +import collections.abc + +import numpy as np +from numpy import matrix, asmatrix, bmat +from numpy.testing import ( + assert_, assert_equal, assert_almost_equal, assert_array_equal, + assert_array_almost_equal, assert_raises + ) +from numpy.linalg import matrix_power + +class TestCtor: + def test_basic(self): + A = np.array([[1, 2], [3, 4]]) + mA = matrix(A) + assert_(np.all(mA.A == A)) + + B = bmat("A,A;A,A") + C = bmat([[A, A], [A, A]]) + D = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + assert_(np.all(B.A == D)) + assert_(np.all(C.A == D)) + + E = np.array([[5, 6], [7, 8]]) + AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]]) + assert_(np.all(bmat([A, E]) == AEresult)) + + vec = np.arange(5) + mvec = matrix(vec) + assert_(mvec.shape == (1, 5)) + + def test_exceptions(self): + # Check for ValueError when called with invalid string data. + assert_raises(ValueError, matrix, "invalid") + + def test_bmat_nondefault_str(self): + A = np.array([[1, 2], [3, 4]]) + B = np.array([[5, 6], [7, 8]]) + Aresult = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + mixresult = np.array([[1, 2, 5, 6], + [3, 4, 7, 8], + [5, 6, 1, 2], + [7, 8, 3, 4]]) + assert_(np.all(bmat("A,A;A,A") == Aresult)) + assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult)) + assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B}) + assert_( + np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult)) + b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A}) + assert_(np.all(b2 == mixresult)) + + +class TestProperties: + def test_sum(self): + """Test whether matrix.sum(axis=1) preserves orientation. + Fails in NumPy <= 0.9.6.2127. + """ + M = matrix([[1, 2, 0, 0], + [3, 4, 0, 0], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + sum0 = matrix([8, 12, 4, 6]) + sum1 = matrix([3, 7, 6, 14]).T + sumall = 30 + assert_array_equal(sum0, M.sum(axis=0)) + assert_array_equal(sum1, M.sum(axis=1)) + assert_equal(sumall, M.sum()) + + assert_array_equal(sum0, np.sum(M, axis=0)) + assert_array_equal(sum1, np.sum(M, axis=1)) + assert_equal(sumall, np.sum(M)) + + def test_prod(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.prod(), 720) + assert_equal(x.prod(0), matrix([[4, 10, 18]])) + assert_equal(x.prod(1), matrix([[6], [120]])) + + assert_equal(np.prod(x), 720) + assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]])) + assert_equal(np.prod(x, axis=1), matrix([[6], [120]])) + + y = matrix([0, 1, 3]) + assert_(y.prod() == 0) + + def test_max(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.max(), 6) + assert_equal(x.max(0), matrix([[4, 5, 6]])) + assert_equal(x.max(1), matrix([[3], [6]])) + + assert_equal(np.max(x), 6) + assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]])) + assert_equal(np.max(x, axis=1), matrix([[3], [6]])) + + def test_min(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.min(), 1) + assert_equal(x.min(0), matrix([[1, 2, 3]])) + assert_equal(x.min(1), matrix([[1], [4]])) + + assert_equal(np.min(x), 1) + assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]])) + assert_equal(np.min(x, axis=1), matrix([[1], [4]])) + + def test_ptp(self): + x = np.arange(4).reshape((2, 2)) + mx = x.view(np.matrix) + assert_(mx.ptp() == 3) + assert_(np.all(mx.ptp(0) == np.array([2, 2]))) + assert_(np.all(mx.ptp(1) == np.array([1, 1]))) + + def test_var(self): + x = np.arange(9).reshape((3, 3)) + mx = x.view(np.matrix) + assert_equal(x.var(ddof=0), mx.var(ddof=0)) + assert_equal(x.var(ddof=1), mx.var(ddof=1)) + + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], + [3., 4.]]) + mA = matrix(A) + assert_(np.allclose(linalg.inv(A), mA.I)) + assert_(np.all(np.array(np.transpose(A) == mA.T))) + assert_(np.all(np.array(np.transpose(A) == mA.H))) + assert_(np.all(A == mA.A)) + + B = A + 2j*A + mB = matrix(B) + assert_(np.allclose(linalg.inv(B), mB.I)) + assert_(np.all(np.array(np.transpose(B) == mB.T))) + assert_(np.all(np.array(np.transpose(B).conj() == mB.H))) + + def test_pinv(self): + x = matrix(np.arange(6).reshape(2, 3)) + xpinv = matrix([[-0.77777778, 0.27777778], + [-0.11111111, 0.11111111], + [ 0.55555556, -0.05555556]]) + assert_almost_equal(x.I, xpinv) + + def test_comparisons(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + mB = matrix(A) + 0.1 + assert_(np.all(mB == A+0.1)) + assert_(np.all(mB == matrix(A+0.1))) + assert_(not np.any(mB == matrix(A-0.1))) + assert_(np.all(mA < mB)) + assert_(np.all(mA <= mB)) + assert_(np.all(mA <= mA)) + assert_(not np.any(mA < mA)) + + assert_(not np.any(mB < mA)) + assert_(np.all(mB >= mA)) + assert_(np.all(mB >= mB)) + assert_(not np.any(mB > mB)) + + assert_(np.all(mA == mA)) + assert_(not np.any(mA == mB)) + assert_(np.all(mB != mA)) + + assert_(not np.all(abs(mA) > 0)) + assert_(np.all(abs(mB > 0))) + + def test_asmatrix(self): + A = np.arange(100).reshape(10, 10) + mA = asmatrix(A) + A[0, 0] = -10 + assert_(A[0, 0] == mA[0, 0]) + + def test_noaxis(self): + A = matrix([[1, 0], [0, 1]]) + assert_(A.sum() == matrix(2)) + assert_(A.mean() == matrix(0.5)) + + def test_repr(self): + A = matrix([[1, 0], [0, 1]]) + assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])") + + def test_make_bool_matrix_from_str(self): + A = matrix('True; True; False') + B = matrix([[True], [True], [False]]) + assert_array_equal(A, B) + +class TestCasting: + def test_basic(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + + mB = mA.copy() + O = np.ones((10, 10), np.float64) * 0.1 + mB = mB + O + assert_(mB.dtype.type == np.float64) + assert_(np.all(mA != mB)) + assert_(np.all(mB == mA+0.1)) + + mC = mA.copy() + O = np.ones((10, 10), np.complex128) + mC = mC * O + assert_(mC.dtype.type == np.complex128) + assert_(np.all(mA != mB)) + + +class TestAlgebra: + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], [3., 4.]]) + mA = matrix(A) + + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** i).A, B)) + B = np.dot(B, A) + + Ainv = linalg.inv(A) + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** -i).A, B)) + B = np.dot(B, Ainv) + + assert_(np.allclose((mA * mA).A, np.dot(A, A))) + assert_(np.allclose((mA + mA).A, (A + A))) + assert_(np.allclose((3*mA).A, (3*A))) + + mA2 = matrix(A) + mA2 *= 3 + assert_(np.allclose(mA2.A, 3*A)) + + def test_pow(self): + """Test raising a matrix to an integer power works as expected.""" + m = matrix("1. 2.; 3. 4.") + m2 = m.copy() + m2 **= 2 + mi = m.copy() + mi **= -1 + m4 = m2.copy() + m4 **= 2 + assert_array_almost_equal(m2, m**2) + assert_array_almost_equal(m4, np.dot(m2, m2)) + assert_array_almost_equal(np.dot(mi, m), np.eye(2)) + + def test_scalar_type_pow(self): + m = matrix([[1, 2], [3, 4]]) + for scalar_t in [np.int8, np.uint8]: + two = scalar_t(2) + assert_array_almost_equal(m ** 2, m ** two) + + def test_notimplemented(self): + '''Check that 'not implemented' operations produce a failure.''' + A = matrix([[1., 2.], + [3., 4.]]) + + # __rpow__ + with assert_raises(TypeError): + 1.0**A + + # __mul__ with something not a list, ndarray, tuple, or scalar + with assert_raises(TypeError): + A*object() + + +class TestMatrixReturn: + def test_instance_methods(self): + a = matrix([1.0], dtype='f8') + methodargs = { + 'astype': ('intc',), + 'clip': (0.0, 1.0), + 'compress': ([1],), + 'repeat': (1,), + 'reshape': (1,), + 'swapaxes': (0, 0), + 'dot': np.array([1.0]), + } + excluded_methods = [ + 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', + 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', + 'searchsorted', 'setflags', 'setfield', 'sort', + 'partition', 'argpartition', 'newbyteorder', 'to_device', + 'take', 'tofile', 'tolist', 'tostring', 'tobytes', 'all', 'any', + 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', + 'prod', 'std', 'ctypes', 'itemset', 'bitwise_count', + ] + for attrib in dir(a): + if attrib.startswith('_') or attrib in excluded_methods: + continue + f = getattr(a, attrib) + if isinstance(f, collections.abc.Callable): + # reset contents of a + a.astype('f8') + a.fill(1.0) + if attrib in methodargs: + args = methodargs[attrib] + else: + args = () + b = f(*args) + assert_(type(b) is matrix, "%s" % attrib) + assert_(type(a.real) is matrix) + assert_(type(a.imag) is matrix) + c, d = matrix([0.0]).nonzero() + assert_(type(c) is np.ndarray) + assert_(type(d) is np.ndarray) + + +class TestIndexing: + def test_basic(self): + x = asmatrix(np.zeros((3, 2), float)) + y = np.zeros((3, 1), float) + y[:, 0] = [0.8, 0.2, 0.3] + x[:, 1] = y > 0.5 + assert_equal(x, [[0, 1], [0, 0], [0, 0]]) + + +class TestNewScalarIndexing: + a = matrix([[1, 2], [3, 4]]) + + def test_dimesions(self): + a = self.a + x = a[0] + assert_equal(x.ndim, 2) + + def test_array_from_matrix_list(self): + a = self.a + x = np.array([a, a]) + assert_equal(x.shape, [2, 2, 2]) + + def test_array_to_list(self): + a = self.a + assert_equal(a.tolist(), [[1, 2], [3, 4]]) + + def test_fancy_indexing(self): + a = self.a + x = a[1, [0, 1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4, 3]])) + x = a[[1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4], [1, 2]])) + x = a[[[1], [0]], [[1, 0], [0, 1]]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[4, 3], [1, 2]])) + + def test_matrix_element(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x[0][0], matrix([[1, 2, 3]])) + assert_equal(x[0][0].shape, (1, 3)) + assert_equal(x[0].shape, (1, 3)) + assert_equal(x[:, 0].shape, (2, 1)) + + x = matrix(0) + assert_equal(x[0, 0], 0) + assert_equal(x[0], 0) + assert_equal(x[:, 0].shape, x.shape) + + def test_scalar_indexing(self): + x = asmatrix(np.zeros((3, 2), float)) + assert_equal(x[0, 0], x[0][0]) + + def test_row_column_indexing(self): + x = asmatrix(np.eye(2)) + assert_array_equal(x[0,:], [[1, 0]]) + assert_array_equal(x[1,:], [[0, 1]]) + assert_array_equal(x[:, 0], [[1], [0]]) + assert_array_equal(x[:, 1], [[0], [1]]) + + def test_boolean_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, np.array([True, False])], x[:, 0]) + assert_array_equal(x[np.array([True, False, False]),:], x[0,:]) + + def test_list_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, [1, 0]], x[:, ::-1]) + assert_array_equal(x[[2, 1, 0],:], x[::-1,:]) + + +class TestPower: + def test_returntype(self): + a = np.array([[0, 1], [0, 0]]) + assert_(type(matrix_power(a, 2)) is np.ndarray) + a = asmatrix(a) + assert_(type(matrix_power(a, 2)) is matrix) + + def test_list(self): + assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) + + +class TestShape: + + a = np.array([[1], [2]]) + m = matrix([[1], [2]]) + + def test_shape(self): + assert_equal(self.a.shape, (2, 1)) + assert_equal(self.m.shape, (2, 1)) + + def test_numpy_ravel(self): + assert_equal(np.ravel(self.a).shape, (2,)) + assert_equal(np.ravel(self.m).shape, (2,)) + + def test_member_ravel(self): + assert_equal(self.a.ravel().shape, (2,)) + assert_equal(self.m.ravel().shape, (1, 2)) + + def test_member_flatten(self): + assert_equal(self.a.flatten().shape, (2,)) + assert_equal(self.m.flatten().shape, (1, 2)) + + def test_numpy_ravel_order(self): + x = np.array([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + + def test_matrix_ravel_order(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.ravel(), [[1, 2, 3, 4, 5, 6]]) + assert_equal(x.ravel(order='F'), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(order='A'), [[1, 2, 3, 4, 5, 6]]) + + def test_array_memory_sharing(self): + assert_(np.may_share_memory(self.a, self.a.ravel())) + assert_(not np.may_share_memory(self.a, self.a.flatten())) + + def test_matrix_memory_sharing(self): + assert_(np.may_share_memory(self.m, self.m.ravel())) + assert_(not np.may_share_memory(self.m, self.m.flatten())) + + def test_expand_dims_matrix(self): + # matrices are always 2d - so expand_dims only makes sense when the + # type is changed away from matrix. + a = np.arange(10).reshape((2, 5)).view(np.matrix) + expanded = np.expand_dims(a, axis=1) + assert_equal(expanded.ndim, 3) + assert_(not isinstance(expanded, np.matrix)) diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_interaction.py b/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_interaction.py new file mode 100644 index 0000000000000000000000000000000000000000..fa6067b0e362b13433749278366ae17e9249efaf --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_interaction.py @@ -0,0 +1,354 @@ +"""Tests of interaction of matrix with other parts of numpy. + +Note that tests with MaskedArray and linalg are done in separate files. +""" +import pytest + +import textwrap +import warnings + +import numpy as np +from numpy.testing import (assert_, assert_equal, assert_raises, + assert_raises_regex, assert_array_equal, + assert_almost_equal, assert_array_almost_equal) + + +def test_fancy_indexing(): + # The matrix class messes with the shape. While this is always + # weird (getitem is not used, it does not have setitem nor knows + # about fancy indexing), this tests gh-3110 + # 2018-04-29: moved here from core.tests.test_index. + m = np.matrix([[1, 2], [3, 4]]) + + assert_(isinstance(m[[0, 1, 0], :], np.matrix)) + + # gh-3110. Note the transpose currently because matrices do *not* + # support dimension fixing for fancy indexing correctly. + x = np.asmatrix(np.arange(50).reshape(5, 10)) + assert_equal(x[:2, np.array(-1)], x[:2, -1].T) + + +def test_polynomial_mapdomain(): + # test that polynomial preserved matrix subtype. + # 2018-04-29: moved here from polynomial.tests.polyutils. + dom1 = [0, 4] + dom2 = [1, 3] + x = np.matrix([dom1, dom1]) + res = np.polynomial.polyutils.mapdomain(x, dom1, dom2) + assert_(isinstance(res, np.matrix)) + + +def test_sort_matrix_none(): + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.sort(a, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_partition_matrix_none(): + # gh-4301 + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.partition(a, 1, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_dot_scalar_and_matrix_of_objects(): + # Ticket #2469 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.dot(arr, 3), desired) + assert_equal(np.dot(3, arr), desired) + + +def test_inner_scalar_and_matrix(): + # 2018-04-29: moved here from core.tests.test_multiarray + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + arr = np.matrix([[1, 2], [3, 4]], dtype=dt) + desired = np.matrix([[3, 6], [9, 12]], dtype=dt) + assert_equal(np.inner(arr, sca), desired) + assert_equal(np.inner(sca, arr), desired) + + +def test_inner_scalar_and_matrix_of_objects(): + # Ticket #4482 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.inner(arr, 3), desired) + assert_equal(np.inner(3, arr), desired) + + +def test_iter_allocate_output_subtype(): + # Make sure that the subtype with priority wins + # 2018-04-29: moved here from core.tests.test_nditer, given the + # matrix specific shape test. + + # matrix vs ndarray + a = np.matrix([[1, 2], [3, 4]]) + b = np.arange(4).reshape(2, 2).T + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_(type(i.operands[2]) is np.matrix) + assert_(type(i.operands[2]) is not np.ndarray) + assert_equal(i.operands[2].shape, (2, 2)) + + # matrix always wants things to be 2D + b = np.arange(4).reshape(1, 2, 2) + assert_raises(RuntimeError, np.nditer, [a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + # but if subtypes are disabled, the result can still work + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) + assert_(type(i.operands[2]) is np.ndarray) + assert_(type(i.operands[2]) is not np.matrix) + assert_equal(i.operands[2].shape, (1, 2, 2)) + + +def like_function(): + # 2018-04-29: moved here from core.tests.test_numeric + a = np.matrix([[1, 2], [3, 4]]) + for like_function in np.zeros_like, np.ones_like, np.empty_like: + b = like_function(a) + assert_(type(b) is np.matrix) + + c = like_function(a, subok=False) + assert_(type(c) is not np.matrix) + + +def test_array_astype(): + # 2018-04-29: copied here from core.tests.test_api + # subok=True passes through a matrix + a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4') + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), np.matrix) + + # subok=False never returns a matrix + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not np.matrix) + + +def test_stack(): + # 2018-04-29: copied here from core.tests.test_shape_base + # check np.matrix cannot be stacked + m = np.matrix([[1, 2], [3, 4]]) + assert_raises_regex(ValueError, 'shape too large to be a matrix', + np.stack, [m, m]) + + +def test_object_scalar_multiply(): + # Tickets #2469 and #4482 + # 2018-04-29: moved here from core.tests.test_ufunc + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.multiply(arr, 3), desired) + assert_equal(np.multiply(3, arr), desired) + + +def test_nanfunctions_matrices(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in [np.nanmin, np.nanmax]: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + # check that rows of nan are dealt with for subclasses (#4628) + mat[1] = np.nan + for f in [np.nanmin, np.nanmax]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) + and not np.isnan(res[2, 0])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat) + assert_(np.isscalar(res)) + assert_(res != np.nan) + assert_(len(w) == 0) + + +def test_nanfunctions_matrices_general(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod, + np.nanmean, np.nanvar, np.nanstd): + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + + for f in np.nancumsum, np.nancumprod: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3*3)) + + +def test_average_matrix(): + # 2018-04-29: moved here from core.tests.test_function_base. + y = np.matrix(np.random.rand(5, 5)) + assert_array_equal(y.mean(0), np.average(y, 0)) + + a = np.matrix([[1, 2], [3, 4]]) + w = np.matrix([[1, 2], [3, 4]]) + + r = np.average(a, axis=0, weights=w) + assert_equal(type(r), np.matrix) + assert_equal(r, [[2.5, 10.0/3]]) + + +def test_dot_matrix(): + # Test to make sure matrices give the same answer as ndarrays + # 2018-04-29: moved here from core.tests.test_function_base. + x = np.linspace(0, 5) + y = np.linspace(-5, 0) + mx = np.matrix(x) + my = np.matrix(y) + r = np.dot(x, y) + mr = np.dot(mx, my.T) + assert_almost_equal(mr, r) + + +def test_ediff1d_matrix(): + # 2018-04-29: moved here from core.tests.test_arraysetops. + assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix)) + assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix)) + + +def test_apply_along_axis_matrix(): + # this test is particularly malicious because matrix + # refuses to become 1d + # 2018-04-29: moved here from core.tests.test_shape_base. + def double(row): + return row * 2 + + m = np.matrix([[0, 1], [2, 3]]) + expected = np.matrix([[0, 2], [4, 6]]) + + result = np.apply_along_axis(double, 0, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + result = np.apply_along_axis(double, 1, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + +def test_kron_matrix(): + # 2018-04-29: moved here from core.tests.test_shape_base. + a = np.ones([2, 2]) + m = np.asmatrix(a) + assert_equal(type(np.kron(a, a)), np.ndarray) + assert_equal(type(np.kron(m, m)), np.matrix) + assert_equal(type(np.kron(a, m)), np.matrix) + assert_equal(type(np.kron(m, a)), np.matrix) + + +class TestConcatenatorMatrix: + # 2018-04-29: moved here from core.tests.test_index_tricks. + def test_matrix(self): + a = [1, 2] + b = [3, 4] + + ab_r = np.r_['r', a, b] + ab_c = np.r_['c', a, b] + + assert_equal(type(ab_r), np.matrix) + assert_equal(type(ab_c), np.matrix) + + assert_equal(np.array(ab_r), [[1, 2, 3, 4]]) + assert_equal(np.array(ab_c), [[1], [2], [3], [4]]) + + assert_raises(ValueError, lambda: np.r_['rc', a, b]) + + def test_matrix_scalar(self): + r = np.r_['r', [1, 2], 3] + assert_equal(type(r), np.matrix) + assert_equal(np.array(r), [[1, 2, 3]]) + + def test_matrix_builder(self): + a = np.array([1]) + b = np.array([2]) + c = np.array([3]) + d = np.array([4]) + actual = np.r_['a, b; c, d'] + expected = np.bmat([[a, b], [c, d]]) + + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + +def test_array_equal_error_message_matrix(): + # 2018-04-29: moved here from testing.tests.test_utils. + with pytest.raises(AssertionError) as exc_info: + assert_equal(np.array([1, 2]), np.matrix([1, 2])) + msg = str(exc_info.value) + msg_reference = textwrap.dedent("""\ + + Arrays are not equal + + (shapes (2,), (1, 2) mismatch) + ACTUAL: array([1, 2]) + DESIRED: matrix([[1, 2]])""") + assert_equal(msg, msg_reference) + + +def test_array_almost_equal_matrix(): + # Matrix slicing keeps things 2-D, while array does not necessarily. + # See gh-8452. + # 2018-04-29: moved here from testing.tests.test_utils. + m1 = np.matrix([[1., 2.]]) + m2 = np.matrix([[1., np.nan]]) + m3 = np.matrix([[1., -np.inf]]) + m4 = np.matrix([[np.nan, np.inf]]) + m5 = np.matrix([[1., 2.], [np.nan, np.inf]]) + for assert_func in assert_array_almost_equal, assert_almost_equal: + for m in m1, m2, m3, m4, m5: + assert_func(m, m) + a = np.array(m) + assert_func(a, m) + assert_func(m, a) diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_masked_matrix.py b/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_masked_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..753c3011cf6bd7f557b110554ced4fe823ae8eea --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_masked_matrix.py @@ -0,0 +1,232 @@ +import pickle + +import numpy as np +from numpy.testing import assert_warns +from numpy.ma.testutils import (assert_, assert_equal, assert_raises, + assert_array_equal) +from numpy.ma.core import (masked_array, masked_values, masked, allequal, + MaskType, getmask, MaskedArray, nomask, + log, add, hypot, divide) +from numpy.ma.extras import mr_ + + +class MMatrix(MaskedArray, np.matrix,): + + def __new__(cls, data, mask=nomask): + mat = np.matrix(data) + _data = MaskedArray.__new__(cls, data=mat, mask=mask) + return _data + + def __array_finalize__(self, obj): + np.matrix.__array_finalize__(self, obj) + MaskedArray.__array_finalize__(self, obj) + return + + @property + def _series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + + +class TestMaskedMatrix: + def test_matrix_indexing(self): + # Tests conversions and indexing + x1 = np.matrix([[1, 2, 3], [4, 3, 2]]) + x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]]) + x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]]) + x4 = masked_array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + # tests of indexing + assert_(type(x2[1, 0]) is type(x1[1, 0])) + assert_(x1[1, 0] == x2[1, 0]) + assert_(x2[1, 1] is masked) + assert_equal(x1[0, 2], x2[0, 2]) + assert_equal(x1[0, 1:], x2[0, 1:]) + assert_equal(x1[:, 2], x2[:, 2]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[0, 2] = 9 + x2[0, 2] = 9 + assert_equal(x1, x2) + x1[0, 1:] = 99 + x2[0, 1:] = 99 + assert_equal(x1, x2) + x2[0, 1] = masked + assert_equal(x1, x2) + x2[0, 1:] = masked + assert_equal(x1, x2) + x2[0, :] = x1[0, :] + x2[0, 1] = masked + assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]]))) + x3[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0]))) + assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0]))) + x4[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0]))) + assert_(allequal(x4[1], masked_array([1, 2, 3]))) + x1 = np.matrix(np.arange(5) * 1.0) + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType), + x2.mask)) + assert_equal(3.0, x2.fill_value) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.matrix)) + + def test_count_mean_with_matrix(self): + m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2))) + + assert_equal(m.count(axis=0).shape, (1, 2)) + assert_equal(m.count(axis=1).shape, (2, 1)) + + # Make sure broadcasting inside mean and var work + assert_equal(m.mean(axis=0), [[2., 3.]]) + assert_equal(m.mean(axis=1), [[1.5], [3.5]]) + + def test_flat(self): + # Test that flat can return items even for matrices [#4585, #4615] + # test simple access + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + assert_equal(test.flat[1], 2) + assert_equal(test.flat[2], masked) + assert_(np.all(test.flat[0:2] == test[0, 0:2])) + # Test flat on masked_matrices + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) + control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) + assert_equal(test, control) + # Test setting + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + testflat = test.flat + testflat[:] = testflat[[2, 1, 0]] + assert_equal(test, control) + testflat[0] = 9 + # test that matrices keep the correct shape (#4615) + a = masked_array(np.matrix(np.eye(2)), mask=0) + b = a.flat + b01 = b[:2] + assert_equal(b01.data, np.array([[1., 0.]])) + assert_equal(b01.mask, np.array([[False, False]])) + + def test_allany_onmatrices(self): + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + X = np.matrix(x) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool) + mX = masked_array(X, mask=m) + mXbig = (mX > 0.5) + mXsmall = (mX < 0.5) + + assert_(not mXbig.all()) + assert_(mXbig.any()) + assert_equal(mXbig.all(0), np.matrix([False, False, True])) + assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) + assert_equal(mXbig.any(0), np.matrix([False, False, True])) + assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) + + assert_(not mXsmall.all()) + assert_(mXsmall.any()) + assert_equal(mXsmall.all(0), np.matrix([True, True, False])) + assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) + assert_equal(mXsmall.any(0), np.matrix([True, True, False])) + assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) + + def test_compressed(self): + a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + assert_(isinstance(b, np.matrix)) + a[0, 0] = masked + b = a.compressed() + assert_equal(b, [[2, 3, 4]]) + + def test_ravel(self): + a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel.shape, (1, 5)) + assert_equal(aravel._mask.shape, a.shape) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = masked_array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + test = a.view((float, 2), np.matrix) + assert_equal(test, data) + assert_(isinstance(test, np.matrix)) + assert_(not isinstance(test, MaskedArray)) + + +class TestSubclassing: + # Test suite for masked subclasses of ndarray. + + def setup_method(self): + x = np.arange(5, dtype='float') + mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + assert_(isinstance(mx._data, np.matrix)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), MMatrix)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a MMatrix + assert_(isinstance(add(mx, mx), MMatrix)) + assert_(isinstance(add(mx, x), MMatrix)) + # Result should work + assert_equal(add(mx, x), mx+x) + assert_(isinstance(add(mx, mx)._data, np.matrix)) + with assert_warns(DeprecationWarning): + assert_(isinstance(add.outer(mx, mx), MMatrix)) + assert_(isinstance(hypot(mx, mx), MMatrix)) + assert_(isinstance(hypot(mx, x), MMatrix)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), MMatrix)) + assert_(isinstance(divide(mx, x), MMatrix)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + +class TestConcatenator: + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_matrix_builder(self): + assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4']) + + def test_matrix(self): + # Test consistency with unmasked version. If we ever deprecate + # matrix, this test should either still pass, or both actual and + # expected should fail to be build. + actual = mr_['r', 1, 2, 3] + expected = np.ma.array(np.r_['r', 1, 2, 3]) + assert_array_equal(actual, expected) + + # outer type is masked array, inner type is matrix + assert_equal(type(actual), type(expected)) + assert_equal(type(actual.data), type(expected.data)) diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py b/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..abfa0183f24778c4cec37674b1210c148f48c04b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py @@ -0,0 +1,93 @@ +""" Test functions for linalg module using the matrix class.""" +import numpy as np + +from numpy.linalg.tests.test_linalg import ( + LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase, + _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base, + SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases, + PinvCases, DetCases, LstsqCases) + + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("0x0_matrix", + np.empty((0, 0), dtype=np.double).view(np.matrix), + np.empty((0, 1), dtype=np.double).view(np.matrix), + tags={'size-0'}), + LinalgCase("matrix_b_only", + np.array([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), + LinalgCase("matrix_a_and_b", + np.matrix([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hmatrix_a_and_b", + np.matrix([[1., 2.], [2., 1.]]), + None), +]) +# No need to make generalized or strided cases for matrices. + + +class MatrixTestCase(LinalgTestCase): + TEST_CASES = CASES + + +class TestSolveMatrix(SolveCases, MatrixTestCase): + pass + + +class TestInvMatrix(InvCases, MatrixTestCase): + pass + + +class TestEigvalsMatrix(EigvalsCases, MatrixTestCase): + pass + + +class TestEigMatrix(EigCases, MatrixTestCase): + pass + + +class TestSVDMatrix(SVDCases, MatrixTestCase): + pass + + +class TestCondMatrix(CondCases, MatrixTestCase): + pass + + +class TestPinvMatrix(PinvCases, MatrixTestCase): + pass + + +class TestDetMatrix(DetCases, MatrixTestCase): + pass + + +class TestLstsqMatrix(LstsqCases, MatrixTestCase): + pass + + +class _TestNorm2DMatrix(_TestNorm2D): + array = np.matrix + + +class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase): + pass + + +class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase): + pass + + +class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base): + pass + + +class TestQRMatrix(_TestQR): + array = np.matrix diff --git a/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_multiarray.py b/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_multiarray.py new file mode 100644 index 0000000000000000000000000000000000000000..71198be5e56370462d5a4d3bb4b5a86b7202a052 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/matrixlib/tests/test_multiarray.py @@ -0,0 +1,16 @@ +import numpy as np +from numpy.testing import assert_, assert_equal, assert_array_equal + +class TestView: + def test_type(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x.view(np.matrix), np.matrix)) + + def test_keywords(self): + x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype='>> from numpy.polynomial import Chebyshev + >>> c = Chebyshev.fit(xdata, ydata, deg=1) + +is preferred over the `chebyshev.chebfit` function from the +``np.polynomial.chebyshev`` module:: + + >>> from numpy.polynomial.chebyshev import chebfit + >>> c = chebfit(xdata, ydata, deg=1) + +See :doc:`routines.polynomials.classes` for more details. + +Convenience Classes +=================== + +The following lists the various constants and methods common to all of +the classes representing the various kinds of polynomials. In the following, +the term ``Poly`` represents any one of the convenience classes (e.g. +`~polynomial.Polynomial`, `~chebyshev.Chebyshev`, `~hermite.Hermite`, etc.) +while the lowercase ``p`` represents an **instance** of a polynomial class. + +Constants +--------- + +- ``Poly.domain`` -- Default domain +- ``Poly.window`` -- Default window +- ``Poly.basis_name`` -- String used to represent the basis +- ``Poly.maxpower`` -- Maximum value ``n`` such that ``p**n`` is allowed +- ``Poly.nickname`` -- String used in printing + +Creation +-------- + +Methods for creating polynomial instances. + +- ``Poly.basis(degree)`` -- Basis polynomial of given degree +- ``Poly.identity()`` -- ``p`` where ``p(x) = x`` for all ``x`` +- ``Poly.fit(x, y, deg)`` -- ``p`` of degree ``deg`` with coefficients + determined by the least-squares fit to the data ``x``, ``y`` +- ``Poly.fromroots(roots)`` -- ``p`` with specified roots +- ``p.copy()`` -- Create a copy of ``p`` + +Conversion +---------- + +Methods for converting a polynomial instance of one kind to another. + +- ``p.cast(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` +- ``p.convert(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` or map + between ``domain`` and ``window`` + +Calculus +-------- +- ``p.deriv()`` -- Take the derivative of ``p`` +- ``p.integ()`` -- Integrate ``p`` + +Validation +---------- +- ``Poly.has_samecoef(p1, p2)`` -- Check if coefficients match +- ``Poly.has_samedomain(p1, p2)`` -- Check if domains match +- ``Poly.has_sametype(p1, p2)`` -- Check if types match +- ``Poly.has_samewindow(p1, p2)`` -- Check if windows match + +Misc +---- +- ``p.linspace()`` -- Return ``x, p(x)`` at equally-spaced points in ``domain`` +- ``p.mapparms()`` -- Return the parameters for the linear mapping between + ``domain`` and ``window``. +- ``p.roots()`` -- Return the roots of ``p``. +- ``p.trim()`` -- Remove trailing coefficients. +- ``p.cutdeg(degree)`` -- Truncate ``p`` to given degree +- ``p.truncate(size)`` -- Truncate ``p`` to given size + +""" +from .polynomial import Polynomial +from .chebyshev import Chebyshev +from .legendre import Legendre +from .hermite import Hermite +from .hermite_e import HermiteE +from .laguerre import Laguerre + +__all__ = [ + "set_default_printstyle", + "polynomial", "Polynomial", + "chebyshev", "Chebyshev", + "legendre", "Legendre", + "hermite", "Hermite", + "hermite_e", "HermiteE", + "laguerre", "Laguerre", +] + + +def set_default_printstyle(style): + """ + Set the default format for the string representation of polynomials. + + Values for ``style`` must be valid inputs to ``__format__``, i.e. 'ascii' + or 'unicode'. + + Parameters + ---------- + style : str + Format string for default printing style. Must be either 'ascii' or + 'unicode'. + + Notes + ----- + The default format depends on the platform: 'unicode' is used on + Unix-based systems and 'ascii' on Windows. This determination is based on + default font support for the unicode superscript and subscript ranges. + + Examples + -------- + >>> p = np.polynomial.Polynomial([1, 2, 3]) + >>> c = np.polynomial.Chebyshev([1, 2, 3]) + >>> np.polynomial.set_default_printstyle('unicode') + >>> print(p) + 1.0 + 2.0·x + 3.0·x² + >>> print(c) + 1.0 + 2.0·T₁(x) + 3.0·T₂(x) + >>> np.polynomial.set_default_printstyle('ascii') + >>> print(p) + 1.0 + 2.0 x + 3.0 x**2 + >>> print(c) + 1.0 + 2.0 T_1(x) + 3.0 T_2(x) + >>> # Formatting supersedes all class/package-level defaults + >>> print(f"{p:unicode}") + 1.0 + 2.0·x + 3.0·x² + """ + if style not in ('unicode', 'ascii'): + raise ValueError( + f"Unsupported format string '{style}'. Valid options are 'ascii' " + f"and 'unicode'" + ) + _use_unicode = True + if style == 'ascii': + _use_unicode = False + from ._polybase import ABCPolyBase + ABCPolyBase._use_unicode = _use_unicode + + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/phivenv/Lib/site-packages/numpy/polynomial/__init__.pyi b/phivenv/Lib/site-packages/numpy/polynomial/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c96a31334b04132b296b394a55830aed127dc1d0 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/__init__.pyi @@ -0,0 +1,21 @@ +from numpy._pytesttester import PytestTester + +from numpy.polynomial import ( + chebyshev as chebyshev, + hermite as hermite, + hermite_e as hermite_e, + laguerre as laguerre, + legendre as legendre, + polynomial as polynomial, +) +from numpy.polynomial.chebyshev import Chebyshev as Chebyshev +from numpy.polynomial.hermite import Hermite as Hermite +from numpy.polynomial.hermite_e import HermiteE as HermiteE +from numpy.polynomial.laguerre import Laguerre as Laguerre +from numpy.polynomial.legendre import Legendre as Legendre +from numpy.polynomial.polynomial import Polynomial as Polynomial + +__all__: list[str] +test: PytestTester + +def set_default_printstyle(style): ... diff --git a/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/chebyshev.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/chebyshev.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24c478780ffe60bbb57cbddc221d0d70c8fdd30a Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/chebyshev.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/hermite.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/hermite.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8ffb8937042edec30903bb5baf255ff6f260bda Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/hermite.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/hermite_e.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/hermite_e.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a49b4431a968003d248bad2e859a9205036ca09e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/hermite_e.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/laguerre.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/laguerre.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72ff5965b7ac295472a7eef440e7f96cef1efa9a Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/laguerre.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/legendre.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/legendre.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bf0991455e3af9731d57933c26a6371aa101c11 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/__pycache__/legendre.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/_polybase.py b/phivenv/Lib/site-packages/numpy/polynomial/_polybase.py new file mode 100644 index 0000000000000000000000000000000000000000..667adaf4ff0bb1b121a9cc0f7e7d334555e3b17f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/_polybase.py @@ -0,0 +1,1220 @@ +""" +Abstract base class for the various polynomial Classes. + +The ABCPolyBase class provides the methods needed to implement the common API +for the various polynomial classes. It operates as a mixin, but uses the +abc module from the stdlib, hence it is only available for Python >= 2.6. + +""" +import os +import abc +import numbers +from typing import Callable + +import numpy as np +from . import polyutils as pu + +__all__ = ['ABCPolyBase'] + +class ABCPolyBase(abc.ABC): + """An abstract base class for immutable series classes. + + ABCPolyBase provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the + methods listed below. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + coef : array_like + Series coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where + ``P_i`` is the basis polynomials of degree ``i``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is the derived class domain. + window : (2,) array_like, optional + Window, see domain for its use. The default value is the + derived class window. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + Attributes + ---------- + coef : (N,) ndarray + Series coefficients in order of increasing degree. + domain : (2,) ndarray + Domain that is mapped to window. + window : (2,) ndarray + Window that domain is mapped to. + symbol : str + Symbol representing the independent variable. + + Class Attributes + ---------------- + maxpower : int + Maximum power allowed, i.e., the largest number ``n`` such that + ``p(x)**n`` is allowed. This is to limit runaway polynomial size. + domain : (2,) ndarray + Default domain of the class. + window : (2,) ndarray + Default window of the class. + + """ + + # Not hashable + __hash__ = None + + # Opt out of numpy ufuncs and Python ops with ndarray subclasses. + __array_ufunc__ = None + + # Limit runaway size. T_n^m has degree n*m + maxpower = 100 + + # Unicode character mappings for improved __str__ + _superscript_mapping = str.maketrans({ + "0": "⁰", + "1": "¹", + "2": "²", + "3": "³", + "4": "⁴", + "5": "⁵", + "6": "⁶", + "7": "⁷", + "8": "⁸", + "9": "⁹" + }) + _subscript_mapping = str.maketrans({ + "0": "₀", + "1": "₁", + "2": "₂", + "3": "₃", + "4": "₄", + "5": "₅", + "6": "₆", + "7": "₇", + "8": "₈", + "9": "₉" + }) + # Some fonts don't support full unicode character ranges necessary for + # the full set of superscripts and subscripts, including common/default + # fonts in Windows shells/terminals. Therefore, default to ascii-only + # printing on windows. + _use_unicode = not os.name == 'nt' + + @property + def symbol(self): + return self._symbol + + @property + @abc.abstractmethod + def domain(self): + pass + + @property + @abc.abstractmethod + def window(self): + pass + + @property + @abc.abstractmethod + def basis_name(self): + pass + + @staticmethod + @abc.abstractmethod + def _add(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _sub(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _mul(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _div(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _pow(c, pow, maxpower=None): + pass + + @staticmethod + @abc.abstractmethod + def _val(x, c): + pass + + @staticmethod + @abc.abstractmethod + def _int(c, m, k, lbnd, scl): + pass + + @staticmethod + @abc.abstractmethod + def _der(c, m, scl): + pass + + @staticmethod + @abc.abstractmethod + def _fit(x, y, deg, rcond, full): + pass + + @staticmethod + @abc.abstractmethod + def _line(off, scl): + pass + + @staticmethod + @abc.abstractmethod + def _roots(c): + pass + + @staticmethod + @abc.abstractmethod + def _fromroots(r): + pass + + def has_samecoef(self, other): + """Check if coefficients match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``coef`` attribute. + + Returns + ------- + bool : boolean + True if the coefficients are the same, False otherwise. + + """ + if len(self.coef) != len(other.coef): + return False + elif not np.all(self.coef == other.coef): + return False + else: + return True + + def has_samedomain(self, other): + """Check if domains match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``domain`` attribute. + + Returns + ------- + bool : boolean + True if the domains are the same, False otherwise. + + """ + return np.all(self.domain == other.domain) + + def has_samewindow(self, other): + """Check if windows match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``window`` attribute. + + Returns + ------- + bool : boolean + True if the windows are the same, False otherwise. + + """ + return np.all(self.window == other.window) + + def has_sametype(self, other): + """Check if types match. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + other : object + Class instance. + + Returns + ------- + bool : boolean + True if other is same class as self + + """ + return isinstance(other, self.__class__) + + def _get_coefficients(self, other): + """Interpret other as polynomial coefficients. + + The `other` argument is checked to see if it is of the same + class as self with identical domain and window. If so, + return its coefficients, otherwise return `other`. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + other : anything + Object to be checked. + + Returns + ------- + coef + The coefficients of`other` if it is a compatible instance, + of ABCPolyBase, otherwise `other`. + + Raises + ------ + TypeError + When `other` is an incompatible instance of ABCPolyBase. + + """ + if isinstance(other, ABCPolyBase): + if not isinstance(other, self.__class__): + raise TypeError("Polynomial types differ") + elif not np.all(self.domain == other.domain): + raise TypeError("Domains differ") + elif not np.all(self.window == other.window): + raise TypeError("Windows differ") + elif self.symbol != other.symbol: + raise ValueError("Polynomial symbols differ") + return other.coef + return other + + def __init__(self, coef, domain=None, window=None, symbol='x'): + [coef] = pu.as_series([coef], trim=False) + self.coef = coef + + if domain is not None: + [domain] = pu.as_series([domain], trim=False) + if len(domain) != 2: + raise ValueError("Domain has wrong number of elements.") + self.domain = domain + + if window is not None: + [window] = pu.as_series([window], trim=False) + if len(window) != 2: + raise ValueError("Window has wrong number of elements.") + self.window = window + + # Validation for symbol + try: + if not symbol.isidentifier(): + raise ValueError( + "Symbol string must be a valid Python identifier" + ) + # If a user passes in something other than a string, the above + # results in an AttributeError. Catch this and raise a more + # informative exception + except AttributeError: + raise TypeError("Symbol must be a non-empty string") + + self._symbol = symbol + + def __repr__(self): + coef = repr(self.coef)[6:-1] + domain = repr(self.domain)[6:-1] + window = repr(self.window)[6:-1] + name = self.__class__.__name__ + return (f"{name}({coef}, domain={domain}, window={window}, " + f"symbol='{self.symbol}')") + + def __format__(self, fmt_str): + if fmt_str == '': + return self.__str__() + if fmt_str not in ('ascii', 'unicode'): + raise ValueError( + f"Unsupported format string '{fmt_str}' passed to " + f"{self.__class__}.__format__. Valid options are " + f"'ascii' and 'unicode'" + ) + if fmt_str == 'ascii': + return self._generate_string(self._str_term_ascii) + return self._generate_string(self._str_term_unicode) + + def __str__(self): + if self._use_unicode: + return self._generate_string(self._str_term_unicode) + return self._generate_string(self._str_term_ascii) + + def _generate_string(self, term_method): + """ + Generate the full string representation of the polynomial, using + ``term_method`` to generate each polynomial term. + """ + # Get configuration for line breaks + linewidth = np.get_printoptions().get('linewidth', 75) + if linewidth < 1: + linewidth = 1 + out = pu.format_float(self.coef[0]) + + off, scale = self.mapparms() + + scaled_symbol, needs_parens = self._format_term(pu.format_float, + off, scale) + if needs_parens: + scaled_symbol = '(' + scaled_symbol + ')' + + for i, coef in enumerate(self.coef[1:]): + out += " " + power = str(i + 1) + # Polynomial coefficient + # The coefficient array can be an object array with elements that + # will raise a TypeError with >= 0 (e.g. strings or Python + # complex). In this case, represent the coefficient as-is. + try: + if coef >= 0: + next_term = "+ " + pu.format_float(coef, parens=True) + else: + next_term = "- " + pu.format_float(-coef, parens=True) + except TypeError: + next_term = f"+ {coef}" + # Polynomial term + next_term += term_method(power, scaled_symbol) + # Length of the current line with next term added + line_len = len(out.split('\n')[-1]) + len(next_term) + # If not the last term in the polynomial, it will be two + # characters longer due to the +/- with the next term + if i < len(self.coef[1:]) - 1: + line_len += 2 + # Handle linebreaking + if line_len >= linewidth: + next_term = next_term.replace(" ", "\n", 1) + out += next_term + return out + + @classmethod + def _str_term_unicode(cls, i, arg_str): + """ + String representation of single polynomial term using unicode + characters for superscripts and subscripts. + """ + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis_name, or override " + "_str_term_unicode(cls, i, arg_str)" + ) + return (f"·{cls.basis_name}{i.translate(cls._subscript_mapping)}" + f"({arg_str})") + + @classmethod + def _str_term_ascii(cls, i, arg_str): + """ + String representation of a single polynomial term using ** and _ to + represent superscripts and subscripts, respectively. + """ + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis_name, or override " + "_str_term_ascii(cls, i, arg_str)" + ) + return f" {cls.basis_name}_{i}({arg_str})" + + @classmethod + def _repr_latex_term(cls, i, arg_str, needs_parens): + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis name, or override " + "_repr_latex_term(i, arg_str, needs_parens)") + # since we always add parens, we don't care if the expression needs them + return f"{{{cls.basis_name}}}_{{{i}}}({arg_str})" + + @staticmethod + def _repr_latex_scalar(x, parens=False): + # TODO: we're stuck with disabling math formatting until we handle + # exponents in this function + return r'\text{{{}}}'.format(pu.format_float(x, parens=parens)) + + def _format_term(self, scalar_format: Callable, off: float, scale: float): + """ Format a single term in the expansion """ + if off == 0 and scale == 1: + term = self.symbol + needs_parens = False + elif scale == 1: + term = f"{scalar_format(off)} + {self.symbol}" + needs_parens = True + elif off == 0: + term = f"{scalar_format(scale)}{self.symbol}" + needs_parens = True + else: + term = ( + f"{scalar_format(off)} + " + f"{scalar_format(scale)}{self.symbol}" + ) + needs_parens = True + return term, needs_parens + + def _repr_latex_(self): + # get the scaled argument string to the basis functions + off, scale = self.mapparms() + term, needs_parens = self._format_term(self._repr_latex_scalar, + off, scale) + + mute = r"\color{{LightGray}}{{{}}}".format + + parts = [] + for i, c in enumerate(self.coef): + # prevent duplication of + and - signs + if i == 0: + coef_str = f"{self._repr_latex_scalar(c)}" + elif not isinstance(c, numbers.Real): + coef_str = f" + ({self._repr_latex_scalar(c)})" + elif c >= 0: + coef_str = f" + {self._repr_latex_scalar(c, parens=True)}" + else: + coef_str = f" - {self._repr_latex_scalar(-c, parens=True)}" + + # produce the string for the term + term_str = self._repr_latex_term(i, term, needs_parens) + if term_str == '1': + part = coef_str + else: + part = rf"{coef_str}\,{term_str}" + + if c == 0: + part = mute(part) + + parts.append(part) + + if parts: + body = ''.join(parts) + else: + # in case somehow there are no coefficients at all + body = '0' + + return rf"${self.symbol} \mapsto {body}$" + + + + # Pickle and copy + + def __getstate__(self): + ret = self.__dict__.copy() + ret['coef'] = self.coef.copy() + ret['domain'] = self.domain.copy() + ret['window'] = self.window.copy() + ret['symbol'] = self.symbol + return ret + + def __setstate__(self, dict): + self.__dict__ = dict + + # Call + + def __call__(self, arg): + arg = pu.mapdomain(arg, self.domain, self.window) + return self._val(arg, self.coef) + + def __iter__(self): + return iter(self.coef) + + def __len__(self): + return len(self.coef) + + # Numeric properties. + + def __neg__(self): + return self.__class__( + -self.coef, self.domain, self.window, self.symbol + ) + + def __pos__(self): + return self + + def __add__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._add(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __sub__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._sub(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __mul__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._mul(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __truediv__(self, other): + # there is no true divide if the rhs is not a Number, although it + # could return the first n elements of an infinite series. + # It is hard to see where n would come from, though. + if not isinstance(other, numbers.Number) or isinstance(other, bool): + raise TypeError( + f"unsupported types for true division: " + f"'{type(self)}', '{type(other)}'" + ) + return self.__floordiv__(other) + + def __floordiv__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __mod__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __divmod__(self, other): + othercoef = self._get_coefficients(other) + try: + quo, rem = self._div(self.coef, othercoef) + except ZeroDivisionError: + raise + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window, self.symbol) + rem = self.__class__(rem, self.domain, self.window, self.symbol) + return quo, rem + + def __pow__(self, other): + coef = self._pow(self.coef, other, maxpower=self.maxpower) + res = self.__class__(coef, self.domain, self.window, self.symbol) + return res + + def __radd__(self, other): + try: + coef = self._add(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rsub__(self, other): + try: + coef = self._sub(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rmul__(self, other): + try: + coef = self._mul(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rdiv__(self, other): + # set to __floordiv__ /. + return self.__rfloordiv__(other) + + def __rtruediv__(self, other): + # An instance of ABCPolyBase is not considered a + # Number. + return NotImplemented + + def __rfloordiv__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __rmod__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __rdivmod__(self, other): + try: + quo, rem = self._div(other, self.coef) + except ZeroDivisionError: + raise + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window, self.symbol) + rem = self.__class__(rem, self.domain, self.window, self.symbol) + return quo, rem + + def __eq__(self, other): + res = (isinstance(other, self.__class__) and + np.all(self.domain == other.domain) and + np.all(self.window == other.window) and + (self.coef.shape == other.coef.shape) and + np.all(self.coef == other.coef) and + (self.symbol == other.symbol)) + return res + + def __ne__(self, other): + return not self.__eq__(other) + + # + # Extra methods. + # + + def copy(self): + """Return a copy. + + Returns + ------- + new_series : series + Copy of self. + + """ + return self.__class__(self.coef, self.domain, self.window, self.symbol) + + def degree(self): + """The degree of the series. + + .. versionadded:: 1.5.0 + + Returns + ------- + degree : int + Degree of the series, one less than the number of coefficients. + + Examples + -------- + + Create a polynomial object for ``1 + 7*x + 4*x**2``: + + >>> poly = np.polynomial.Polynomial([1, 7, 4]) + >>> print(poly) + 1.0 + 7.0·x + 4.0·x² + >>> poly.degree() + 2 + + Note that this method does not check for non-zero coefficients. + You must trim the polynomial to remove any trailing zeroes: + + >>> poly = np.polynomial.Polynomial([1, 7, 0]) + >>> print(poly) + 1.0 + 7.0·x + 0.0·x² + >>> poly.degree() + 2 + >>> poly.trim().degree() + 1 + + """ + return len(self) - 1 + + def cutdeg(self, deg): + """Truncate series to the given degree. + + Reduce the degree of the series to `deg` by discarding the + high order terms. If `deg` is greater than the current degree a + copy of the current series is returned. This can be useful in least + squares where the coefficients of the high degree terms may be very + small. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + deg : non-negative int + The series is reduced to degree `deg` by discarding the high + order terms. The value of `deg` must be a non-negative integer. + + Returns + ------- + new_series : series + New instance of series with reduced degree. + + """ + return self.truncate(deg + 1) + + def trim(self, tol=0): + """Remove trailing coefficients + + Remove trailing coefficients until a coefficient is reached whose + absolute value greater than `tol` or the beginning of the series is + reached. If all the coefficients would be removed the series is set + to ``[0]``. A new series instance is returned with the new + coefficients. The current instance remains unchanged. + + Parameters + ---------- + tol : non-negative number. + All trailing coefficients less than `tol` will be removed. + + Returns + ------- + new_series : series + New instance of series with trimmed coefficients. + + """ + coef = pu.trimcoef(self.coef, tol) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def truncate(self, size): + """Truncate series to length `size`. + + Reduce the series to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. This + can be useful in least squares where the coefficients of the + high degree terms may be very small. + + Parameters + ---------- + size : positive int + The series is reduced to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. + + Returns + ------- + new_series : series + New instance of series with truncated coefficients. + + """ + isize = int(size) + if isize != size or isize < 1: + raise ValueError("size must be a positive integer") + if isize >= len(self.coef): + coef = self.coef + else: + coef = self.coef[:isize] + return self.__class__(coef, self.domain, self.window, self.symbol) + + def convert(self, domain=None, kind=None, window=None): + """Convert series to a different kind and/or domain and/or window. + + Parameters + ---------- + domain : array_like, optional + The domain of the converted series. If the value is None, + the default domain of `kind` is used. + kind : class, optional + The polynomial series type class to which the current instance + should be converted. If kind is None, then the class of the + current instance is used. + window : array_like, optional + The window of the converted series. If the value is None, + the default window of `kind` is used. + + Returns + ------- + new_series : series + The returned class can be of different type than the current + instance and/or have a different domain and/or different + window. + + Notes + ----- + Conversion between domains and class types can result in + numerically ill defined series. + + """ + if kind is None: + kind = self.__class__ + if domain is None: + domain = kind.domain + if window is None: + window = kind.window + return self(kind.identity(domain, window=window, symbol=self.symbol)) + + def mapparms(self): + """Return the mapping parameters. + + The returned values define a linear map ``off + scl*x`` that is + applied to the input arguments before the series is evaluated. The + map depends on the ``domain`` and ``window``; if the current + ``domain`` is equal to the ``window`` the resulting map is the + identity. If the coefficients of the series instance are to be + used by themselves outside this class, then the linear function + must be substituted for the ``x`` in the standard representation of + the base polynomials. + + Returns + ------- + off, scl : float or complex + The mapping function is defined by ``off + scl*x``. + + Notes + ----- + If the current domain is the interval ``[l1, r1]`` and the window + is ``[l2, r2]``, then the linear mapping function ``L`` is + defined by the equations:: + + L(l1) = l2 + L(r1) = r2 + + """ + return pu.mapparms(self.domain, self.window) + + def integ(self, m=1, k=[], lbnd=None): + """Integrate. + + Return a series instance that is the definite integral of the + current series. + + Parameters + ---------- + m : non-negative int + The number of integrations to perform. + k : array_like + Integration constants. The first constant is applied to the + first integration, the second to the second, and so on. The + list of values must less than or equal to `m` in length and any + missing values are set to zero. + lbnd : Scalar + The lower bound of the definite integral. + + Returns + ------- + new_series : series + A new series representing the integral. The domain is the same + as the domain of the integrated series. + + """ + off, scl = self.mapparms() + if lbnd is None: + lbnd = 0 + else: + lbnd = off + scl*lbnd + coef = self._int(self.coef, m, k, lbnd, 1./scl) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def deriv(self, m=1): + """Differentiate. + + Return a series instance of that is the derivative of the current + series. + + Parameters + ---------- + m : non-negative int + Find the derivative of order `m`. + + Returns + ------- + new_series : series + A new series representing the derivative. The domain is the same + as the domain of the differentiated series. + + """ + off, scl = self.mapparms() + coef = self._der(self.coef, m, scl) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def roots(self): + """Return the roots of the series polynomial. + + Compute the roots for the series. Note that the accuracy of the + roots decreases the further outside the `domain` they lie. + + Returns + ------- + roots : ndarray + Array containing the roots of the series. + + """ + roots = self._roots(self.coef) + return pu.mapdomain(roots, self.window, self.domain) + + def linspace(self, n=100, domain=None): + """Return x, y values at equally spaced points in domain. + + Returns the x, y values at `n` linearly spaced points across the + domain. Here y is the value of the polynomial at the points x. By + default the domain is the same as that of the series instance. + This method is intended mostly as a plotting aid. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + n : int, optional + Number of point pairs to return. The default value is 100. + domain : {None, array_like}, optional + If not None, the specified domain is used instead of that of + the calling instance. It should be of the form ``[beg,end]``. + The default is None which case the class domain is used. + + Returns + ------- + x, y : ndarray + x is equal to linspace(self.domain[0], self.domain[1], n) and + y is the series evaluated at element of x. + + """ + if domain is None: + domain = self.domain + x = np.linspace(domain[0], domain[1], n) + y = self(x) + return x, y + + @classmethod + def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, + window=None, symbol='x'): + """Least squares fit to data. + + Return a series instance that is the least squares fit to the data + `y` sampled at `x`. The domain of the returned instance can be + specified and this will often result in a superior fit with less + chance of ill conditioning. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) + y-coordinates of the M sample points ``(x[i], y[i])``. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + domain : {None, [beg, end], []}, optional + Domain to use for the returned series. If ``None``, + then a minimal domain that covers the points `x` is chosen. If + ``[]`` the class domain is used. The default value was the + class domain in NumPy 1.4 and ``None`` in later versions. + The ``[]`` option was added in numpy 1.5.0. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than this relative to the largest singular value will be + ignored. The default value is ``len(x)*eps``, where eps is the + relative precision of the float type, about 2e-16 in most + cases. + full : bool, optional + Switch determining nature of return value. When it is False + (the default) just the coefficients are returned, when True + diagnostic information from the singular value decomposition is + also returned. + w : array_like, shape (M,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have + the same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + .. versionadded:: 1.5.0 + window : {[beg, end]}, optional + Window to use for the returned series. The default + value is the default class domain + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + A series that represents the least squares fit to the data and + has the domain and window specified in the call. If the + coefficients for the unscaled and unshifted basis polynomials are + of interest, do ``new_series.convert().coef``. + + [resid, rank, sv, rcond] : list + These values are only returned if ``full == True`` + + - resid -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - sv -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + """ + if domain is None: + domain = pu.getdomain(x) + elif type(domain) is list and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + xnew = pu.mapdomain(x, domain, window) + res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full) + if full: + [coef, status] = res + return ( + cls(coef, domain=domain, window=window, symbol=symbol), status + ) + else: + coef = res + return cls(coef, domain=domain, window=window, symbol=symbol) + + @classmethod + def fromroots(cls, roots, domain=[], window=None, symbol='x'): + """Return series instance that has the specified roots. + + Returns a series representing the product + ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a + list of roots. + + Parameters + ---------- + roots : array_like + List of roots. + domain : {[], None, array_like}, optional + Domain for the resulting series. If None the domain is the + interval from the smallest root to the largest. If [] the + domain is the class domain. The default is []. + window : {None, array_like}, optional + Window for the returned series. If None the class window is + used. The default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + Series with the specified roots. + + """ + [roots] = pu.as_series([roots], trim=False) + if domain is None: + domain = pu.getdomain(roots) + elif type(domain) is list and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + deg = len(roots) + off, scl = pu.mapparms(domain, window) + rnew = off + scl*roots + coef = cls._fromroots(rnew) / scl**deg + return cls(coef, domain=domain, window=window, symbol=symbol) + + @classmethod + def identity(cls, domain=None, window=None, symbol='x'): + """Identity function. + + If ``p`` is the returned series, then ``p(x) == x`` for all + values of x. + + Parameters + ---------- + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + Series of representing the identity. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + off, scl = pu.mapparms(window, domain) + coef = cls._line(off, scl) + return cls(coef, domain, window, symbol) + + @classmethod + def basis(cls, deg, domain=None, window=None, symbol='x'): + """Series basis polynomial of degree `deg`. + + Returns the series representing the basis polynomial of degree `deg`. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + deg : int + Degree of the basis polynomial for the series. Must be >= 0. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + A series with the coefficient of the `deg` term set to one and + all others zero. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + ideg = int(deg) + + if ideg != deg or ideg < 0: + raise ValueError("deg must be non-negative integer") + return cls([0]*ideg + [1], domain, window, symbol) + + @classmethod + def cast(cls, series, domain=None, window=None): + """Convert series to series of this class. + + The `series` is expected to be an instance of some polynomial + series of one of the types supported by by the numpy.polynomial + module, but could be some other class that supports the convert + method. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + series : series + The series instance to be converted. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + A series of the same kind as the calling class and equal to + `series` when evaluated. + + See Also + -------- + convert : similar instance method + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + return series.convert(domain, cls, window) diff --git a/phivenv/Lib/site-packages/numpy/polynomial/_polybase.pyi b/phivenv/Lib/site-packages/numpy/polynomial/_polybase.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ea84a9ca122786e6df25ca07df66a54609b9a4db --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/_polybase.pyi @@ -0,0 +1,71 @@ +import abc +from typing import Any, ClassVar + +__all__: list[str] + +class ABCPolyBase(abc.ABC): + __hash__: ClassVar[None] # type: ignore[assignment] + __array_ufunc__: ClassVar[None] + maxpower: ClassVar[int] + coef: Any + @property + def symbol(self) -> str: ... + @property + @abc.abstractmethod + def domain(self): ... + @property + @abc.abstractmethod + def window(self): ... + @property + @abc.abstractmethod + def basis_name(self): ... + def has_samecoef(self, other): ... + def has_samedomain(self, other): ... + def has_samewindow(self, other): ... + def has_sametype(self, other): ... + def __init__(self, coef, domain=..., window=..., symbol: str = ...) -> None: ... + def __format__(self, fmt_str): ... + def __call__(self, arg): ... + def __iter__(self): ... + def __len__(self): ... + def __neg__(self): ... + def __pos__(self): ... + def __add__(self, other): ... + def __sub__(self, other): ... + def __mul__(self, other): ... + def __truediv__(self, other): ... + def __floordiv__(self, other): ... + def __mod__(self, other): ... + def __divmod__(self, other): ... + def __pow__(self, other): ... + def __radd__(self, other): ... + def __rsub__(self, other): ... + def __rmul__(self, other): ... + def __rdiv__(self, other): ... + def __rtruediv__(self, other): ... + def __rfloordiv__(self, other): ... + def __rmod__(self, other): ... + def __rdivmod__(self, other): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def copy(self): ... + def degree(self): ... + def cutdeg(self, deg): ... + def trim(self, tol=...): ... + def truncate(self, size): ... + def convert(self, domain=..., kind=..., window=...): ... + def mapparms(self): ... + def integ(self, m=..., k = ..., lbnd=...): ... + def deriv(self, m=...): ... + def roots(self): ... + def linspace(self, n=..., domain=...): ... + @classmethod + def fit(cls, x, y, deg, domain=..., rcond=..., full=..., w=..., window=...): ... + @classmethod + def fromroots(cls, roots, domain = ..., window=...): ... + @classmethod + def identity(cls, domain=..., window=...): ... + @classmethod + def basis(cls, deg, domain=..., window=...): ... + @classmethod + def cast(cls, series, domain=..., window=...): ... diff --git a/phivenv/Lib/site-packages/numpy/polynomial/chebyshev.py b/phivenv/Lib/site-packages/numpy/polynomial/chebyshev.py new file mode 100644 index 0000000000000000000000000000000000000000..d6d7cef132db4b0ba3db96775e658f33f24a8cef --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/chebyshev.py @@ -0,0 +1,2082 @@ +""" +==================================================== +Chebyshev Series (:mod:`numpy.polynomial.chebyshev`) +==================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Chebyshev series, including a `Chebyshev` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- + +.. autosummary:: + :toctree: generated/ + + Chebyshev + + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + chebdomain + chebzero + chebone + chebx + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + chebadd + chebsub + chebmulx + chebmul + chebdiv + chebpow + chebval + chebval2d + chebval3d + chebgrid2d + chebgrid3d + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + chebder + chebint + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + chebfromroots + chebroots + chebvander + chebvander2d + chebvander3d + chebgauss + chebweight + chebcompanion + chebfit + chebpts1 + chebpts2 + chebtrim + chebline + cheb2poly + poly2cheb + chebinterpolate + +See also +-------- +`numpy.polynomial` + +Notes +----- +The implementations of multiplication, division, integration, and +differentiation use the algebraic identities [1]_: + +.. math:: + T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\ + z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}. + +where + +.. math:: x = \\frac{z + z^{-1}}{2}. + +These identities allow a Chebyshev series to be expressed as a finite, +symmetric Laurent series. In this module, this sort of Laurent series +is referred to as a "z-series." + +References +---------- +.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev + Polynomials," *Journal of Statistical Planning and Inference 14*, 2008 + (https://web.archive.org/web/20080221202153/https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4) + +""" +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd', + 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval', + 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', + 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', + 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', + 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', + 'chebgauss', 'chebweight', 'chebinterpolate'] + +chebtrim = pu.trimcoef + +# +# A collection of functions for manipulating z-series. These are private +# functions and do minimal error checking. +# + +def _cseries_to_zseries(c): + """Convert Chebyshev series to z-series. + + Convert a Chebyshev series to the equivalent z-series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high + + Returns + ------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + """ + n = c.size + zs = np.zeros(2*n-1, dtype=c.dtype) + zs[n-1:] = c/2 + return zs + zs[::-1] + + +def _zseries_to_cseries(zs): + """Convert z-series to a Chebyshev series. + + Convert a z series to the equivalent Chebyshev series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + Returns + ------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high. + + """ + n = (zs.size + 1)//2 + c = zs[n-1:].copy() + c[1:n] *= 2 + return c + + +def _zseries_mul(z1, z2): + """Multiply two z-series. + + Multiply two z-series to produce a z-series. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D but this is not checked. + + Returns + ------- + product : 1-D ndarray + The product z-series. + + Notes + ----- + This is simply convolution. If symmetric/anti-symmetric z-series are + denoted by S/A then the following rules apply: + + S*S, A*A -> S + S*A, A*S -> A + + """ + return np.convolve(z1, z2) + + +def _zseries_div(z1, z2): + """Divide the first z-series by the second. + + Divide `z1` by `z2` and return the quotient and remainder as z-series. + Warning: this implementation only applies when both z1 and z2 have the + same symmetry, which is sufficient for present purposes. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D and have the same symmetry, but this is not + checked. + + Returns + ------- + + (quotient, remainder) : 1-D ndarrays + Quotient and remainder as z-series. + + Notes + ----- + This is not the same as polynomial division on account of the desired form + of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A + then the following rules apply: + + S/S -> S,S + A/A -> S,A + + The restriction to types of the same symmetry could be fixed but seems like + unneeded generality. There is no natural form for the remainder in the case + where there is no symmetry. + + """ + z1 = z1.copy() + z2 = z2.copy() + lc1 = len(z1) + lc2 = len(z2) + if lc2 == 1: + z1 /= z2 + return z1, z1[:1]*0 + elif lc1 < lc2: + return z1[:1]*0, z1 + else: + dlen = lc1 - lc2 + scl = z2[0] + z2 /= scl + quo = np.empty(dlen + 1, dtype=z1.dtype) + i = 0 + j = dlen + while i < j: + r = z1[i] + quo[i] = z1[i] + quo[dlen - i] = r + tmp = r*z2 + z1[i:i+lc2] -= tmp + z1[j:j+lc2] -= tmp + i += 1 + j -= 1 + r = z1[i] + quo[i] = r + tmp = r*z2 + z1[i:i+lc2] -= tmp + quo /= scl + rem = z1[i+1:i-1+lc2].copy() + return quo, rem + + +def _zseries_der(zs): + """Differentiate a z-series. + + The derivative is with respect to x, not z. This is achieved using the + chain rule and the value of dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to differentiate. + + Returns + ------- + derivative : z-series + The derivative + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + multiplying the value of zs by two also so that the two cancels in the + division. + + """ + n = len(zs)//2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs *= np.arange(-n, n+1)*2 + d, r = _zseries_div(zs, ns) + return d + + +def _zseries_int(zs): + """Integrate a z-series. + + The integral is with respect to x, not z. This is achieved by a change + of variable using dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to integrate + + Returns + ------- + integral : z-series + The indefinite integral + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + dividing the resulting zs by two. + + """ + n = 1 + len(zs)//2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs = _zseries_mul(zs, ns) + div = np.arange(-n, n+1)*2 + zs[:n] /= div[:n] + zs[n+1:] /= div[n+1:] + zs[n] = 0 + return zs + +# +# Chebyshev series functions +# + + +def poly2cheb(pol): + """ + Convert a polynomial to a Chebyshev series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Chebyshev series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Chebyshev + series. + + See Also + -------- + cheb2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> p = P.Polynomial(range(4)) + >>> p + Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x') + >>> c = p.convert(kind=P.Chebyshev) + >>> c + Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., ... + >>> P.chebyshev.poly2cheb(range(4)) + array([1. , 3.25, 1. , 0.75]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = chebadd(chebmulx(res), pol[i]) + return res + + +def cheb2poly(c): + """ + Convert a Chebyshev series to a polynomial. + + Convert an array representing the coefficients of a Chebyshev series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Chebyshev series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2cheb + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Chebyshev(range(4)) + >>> c + Chebyshev([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x') + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-2., -8., 4., 12.], domain=[-1., 1.], window=[-1., 1.], ... + >>> P.chebyshev.cheb2poly(range(4)) + array([-2., -8., 4., 12.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1) + c1 = polyadd(tmp, polymulx(c1)*2) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Chebyshev default domain. +chebdomain = np.array([-1., 1.]) + +# Chebyshev coefficients representing zero. +chebzero = np.array([0]) + +# Chebyshev coefficients representing one. +chebone = np.array([1]) + +# Chebyshev coefficients representing the identity x. +chebx = np.array([0, 1]) + + +def chebline(off, scl): + """ + Chebyshev series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Chebyshev series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebline(3,2) + array([3, 2]) + >>> C.chebval(-3, C.chebline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def chebfromroots(roots): + """ + Generate a Chebyshev series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Chebyshev form, where the :math:`r_n` are the roots specified in + `roots`. If a zero has multiplicity n, then it must appear in `roots` + n times. For instance, if 2 is a root of multiplicity three and 3 is a + root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. + The roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Chebyshev form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.25, 0. , 0.25]) + >>> j = complex(0,1) + >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([1.5+0.j, 0. +0.j, 0.5+0.j]) + + """ + return pu._fromroots(chebline, chebmul, roots) + + +def chebadd(c1, c2): + """ + Add one Chebyshev series to another. + + Returns the sum of two Chebyshev series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Chebyshev series of their sum. + + See Also + -------- + chebsub, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Chebyshev series + is a Chebyshev series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebadd(c1,c2) + array([4., 4., 4.]) + + """ + return pu._add(c1, c2) + + +def chebsub(c1, c2): + """ + Subtract one Chebyshev series from another. + + Returns the difference of two Chebyshev series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their difference. + + See Also + -------- + chebadd, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Chebyshev + series is a Chebyshev series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebsub(c1,c2) + array([-2., 0., 2.]) + >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def chebmulx(c): + """Multiply a Chebyshev series by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + + .. versionadded:: 1.5.0 + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebmulx([1,2,3]) + array([1. , 2.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + if len(c) > 1: + tmp = c[1:]/2 + prd[2:] = tmp + prd[0:-2] += tmp + return prd + + +def chebmul(c1, c2): + """ + Multiply one Chebyshev series by another. + + Returns the product of two Chebyshev series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their product. + + See Also + -------- + chebadd, chebsub, chebmulx, chebdiv, chebpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Chebyshev polynomial basis set. Thus, to express + the product as a C-series, it is typically necessary to "reproject" + the product onto said basis set, which typically produces + "unintuitive live" (but correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebmul(c1,c2) # multiplication requires "reprojection" + array([ 6.5, 12. , 12. , 4. , 1.5]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + prd = _zseries_mul(z1, z2) + ret = _zseries_to_cseries(prd) + return pu.trimseq(ret) + + +def chebdiv(c1, c2): + """ + Divide one Chebyshev series by another. + + Returns the quotient-with-remainder of two Chebyshev series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Chebyshev series coefficients representing the quotient and + remainder. + + See Also + -------- + chebadd, chebsub, chebmulx, chebmul, chebpow + + Notes + ----- + In general, the (polynomial) division of one C-series by another + results in quotient and remainder terms that are not in the Chebyshev + polynomial basis set. Thus, to express these results as C-series, it + is typically necessary to "reproject" the results onto said basis + set, which typically produces "unintuitive" (but correct) results; + see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not + (array([3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> C.chebdiv(c2,c1) # neither "intuitive" + (array([0., 2.]), array([-2., -4.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + # note: this is more efficient than `pu._div(chebmul, c1, c2)` + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + quo, rem = _zseries_div(z1, z2) + quo = pu.trimseq(_zseries_to_cseries(quo)) + rem = pu.trimseq(_zseries_to_cseries(rem)) + return quo, rem + + +def chebpow(c, pow, maxpower=16): + """Raise a Chebyshev series to a power. + + Returns the Chebyshev series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Chebyshev series of power. + + See Also + -------- + chebadd, chebsub, chebmulx, chebmul, chebdiv + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebpow([1, 2, 3, 4], 2) + array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ]) + + """ + # note: this is more efficient than `pu._pow(chebmul, c1, c2)`, as it + # avoids converting between z and c series repeatedly + + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + zs = _cseries_to_zseries(c) + prd = zs + for i in range(2, power + 1): + prd = np.convolve(prd, zs) + return _zseries_to_cseries(prd) + + +def chebder(c, m=1, scl=1, axis=0): + """ + Differentiate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2`` + while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + + 2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Chebyshev series of the derivative. + + See Also + -------- + chebint + + Notes + ----- + In general, the result of differentiating a C-series needs to be + "reprojected" onto the C-series basis set. Thus, typically, the + result of this function is "unintuitive," albeit correct; see Examples + section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3,4) + >>> C.chebder(c) + array([14., 12., 24.]) + >>> C.chebder(c,3) + array([96.]) + >>> C.chebder(c,scl=-1) + array([-14., -12., -24.]) + >>> C.chebder(c,2,-1) + array([12., 96.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2*j)*c[j] + c[j - 2] += (j*c[j])/(j - 2) + if n > 1: + der[1] = 4*c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]] + represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) + + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + C-series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + chebder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a`- perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3) + >>> C.chebint(c) + array([ 0.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,3) + array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, # may vary + 0.00625 ]) + >>> C.chebint(c, k=3) + array([ 3.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,lbnd=-2) + array([ 8.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,scl=-2) + array([-1., 1., -1., -1.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1]/4 + for j in range(2, n): + tmp[j + 1] = c[j]/(2*(j + 1)) + tmp[j - 1] -= c[j]/(2*(j - 1)) + tmp[0] += k[i] - chebval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebval(x, c, tensor=True): + """ + Evaluate a Chebyshev series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + chebval2d, chebgrid2d, chebval3d, chebgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + x2 = 2*x + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + c0 = c[-i] - c1 + c1 = tmp + c1*x2 + return c0 + c1*x + + +def chebval2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than 2 the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + chebval, chebgrid2d, chebval3d, chebgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(chebval, c, x, y) + + +def chebgrid2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b), + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebval3d, chebgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(chebval, c, x, y) + + +def chebval3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(chebval, c, x, y, z) + + +def chebgrid3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(chebval, c, x, y, z) + + +def chebvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = T_i(x), + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Chebyshev polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and + ``chebval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Chebyshev series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Chebyshev polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. + v[0] = x*0 + 1 + if ideg > 0: + x2 = 2*x + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i-1]*x2 - v[i-2] + return np.moveaxis(v, 0, -1) + + +def chebvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = T_i(x) * T_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Chebyshev polynomials. + + If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + chebvander, chebvander3d, chebval2d, chebval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg) + + +def chebvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Chebyshev polynomials. + + If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + chebvander, chebvander3d, chebval2d, chebval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg) + + +def chebfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Chebyshev series to data. + + Return the coefficients of a Chebyshev series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer, + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is ``len(x)*eps``, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Chebyshev coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + chebval : Evaluates a Chebyshev series. + chebvander : Vandermonde matrix of Chebyshev series. + chebweight : Chebyshev weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Chebyshev series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Chebyshev series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + return pu._fit(chebvander, x, y, deg, rcond, full, w) + + +def chebcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is a Chebyshev basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.array([1.] + [np.sqrt(.5)]*(n-1)) + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[0] = np.sqrt(.5) + top[1:] = 1/2 + bot[...] = top + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 + return mat + + +def chebroots(c): + """ + Compute the roots of a Chebyshev series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * T_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Chebyshev series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as cheb + >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots + array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + # rotated companion matrix reduces error + m = chebcompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def chebinterpolate(func, deg, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the Chebyshev series that interpolates `func` at the Chebyshev + points of the first kind in the interval [-1, 1]. The interpolating + series tends to a minmax approximation to `func` with increasing `deg` + if the function is continuous in the interval. + + .. versionadded:: 1.14.0 + + Parameters + ---------- + func : function + The function to be approximated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial + args : tuple, optional + Extra arguments to be used in the function call. Default is no extra + arguments. + + Returns + ------- + coef : ndarray, shape (deg + 1,) + Chebyshev coefficients of the interpolating series ordered from low to + high. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebinterpolate(lambda x: np.tanh(x) + 0.5, 8) + array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17, + -5.42457905e-02, -2.71387850e-16, 4.51658839e-03, + 2.46716228e-17, -3.79694221e-04, -3.26899002e-16]) + + Notes + ----- + + The Chebyshev polynomials used in the interpolation are orthogonal when + sampled at the Chebyshev points of the first kind. If it is desired to + constrain some of the coefficients they can simply be set to the desired + value after the interpolation, no new interpolation or fit is needed. This + is especially useful if it is known apriori that some of coefficients are + zero. For instance, if the function is even then the coefficients of the + terms of odd degree in the result can be set to zero. + + """ + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int") + if deg < 0: + raise ValueError("expected deg >= 0") + + order = deg + 1 + xcheb = chebpts1(order) + yfunc = func(xcheb, *args) + m = chebvander(xcheb, deg) + c = np.dot(m.T, yfunc) + c[0] /= order + c[1:] /= 0.5*order + + return c + + +def chebgauss(deg): + """ + Gauss-Chebyshev quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. For Gauss-Chebyshev there are closed form solutions for + the sample points and weights. If n = `deg`, then + + .. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n)) + + .. math:: w_i = \\pi / n + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg)) + w = np.ones(ideg)*(np.pi/ideg) + + return x, w + + +def chebweight(x): + """ + The weight function of the Chebyshev polynomials. + + The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of + integration is :math:`[-1, 1]`. The Chebyshev polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x)) + return w + + +def chebpts1(npts): + """ + Chebyshev points of the first kind. + + The Chebyshev points of the first kind are the points ``cos(x)``, + where ``x = [pi*(k + .5)/npts for k in range(npts)]``. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the first kind. + + See Also + -------- + chebpts2 + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 1: + raise ValueError("npts must be >= 1") + + x = 0.5 * np.pi / _npts * np.arange(-_npts+1, _npts+1, 2) + return np.sin(x) + + +def chebpts2(npts): + """ + Chebyshev points of the second kind. + + The Chebyshev points of the second kind are the points ``cos(x)``, + where ``x = [pi*k/(npts - 1) for k in range(npts)]`` sorted in ascending + order. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the second kind. + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 2: + raise ValueError("npts must be >= 2") + + x = np.linspace(-np.pi, 0, _npts) + return np.cos(x) + + +# +# Chebyshev series class +# + +class Chebyshev(ABCPolyBase): + """A Chebyshev series class. + + The Chebyshev class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Chebyshev coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(chebadd) + _sub = staticmethod(chebsub) + _mul = staticmethod(chebmul) + _div = staticmethod(chebdiv) + _pow = staticmethod(chebpow) + _val = staticmethod(chebval) + _int = staticmethod(chebint) + _der = staticmethod(chebder) + _fit = staticmethod(chebfit) + _line = staticmethod(chebline) + _roots = staticmethod(chebroots) + _fromroots = staticmethod(chebfromroots) + + @classmethod + def interpolate(cls, func, deg, domain=None, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the series that interpolates `func` at the Chebyshev points of + the first kind scaled and shifted to the `domain`. The resulting series + tends to a minmax approximation of `func` when the function is + continuous in the domain. + + .. versionadded:: 1.14.0 + + Parameters + ---------- + func : function + The function to be interpolated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial. + domain : {None, [beg, end]}, optional + Domain over which `func` is interpolated. The default is None, in + which case the domain is [-1, 1]. + args : tuple, optional + Extra arguments to be used in the function call. Default is no + extra arguments. + + Returns + ------- + polynomial : Chebyshev instance + Interpolating Chebyshev instance. + + Notes + ----- + See `numpy.polynomial.chebinterpolate` for more details. + + """ + if domain is None: + domain = cls.domain + xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args) + coef = chebinterpolate(xfunc, deg) + return cls(coef, domain=domain) + + # Virtual properties + domain = np.array(chebdomain) + window = np.array(chebdomain) + basis_name = 'T' diff --git a/phivenv/Lib/site-packages/numpy/polynomial/chebyshev.pyi b/phivenv/Lib/site-packages/numpy/polynomial/chebyshev.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0471f81cd66f3be1422a6efac14ee99ab4a7fed5 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/chebyshev.pyi @@ -0,0 +1,52 @@ +from typing import Any + +from numpy import int_ +from numpy.typing import NDArray +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +chebtrim = trimcoef + +def poly2cheb(pol): ... +def cheb2poly(c): ... + +chebdomain: NDArray[int_] +chebzero: NDArray[int_] +chebone: NDArray[int_] +chebx: NDArray[int_] + +def chebline(off, scl): ... +def chebfromroots(roots): ... +def chebadd(c1, c2): ... +def chebsub(c1, c2): ... +def chebmulx(c): ... +def chebmul(c1, c2): ... +def chebdiv(c1, c2): ... +def chebpow(c, pow, maxpower=...): ... +def chebder(c, m=..., scl=..., axis=...): ... +def chebint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def chebval(x, c, tensor=...): ... +def chebval2d(x, y, c): ... +def chebgrid2d(x, y, c): ... +def chebval3d(x, y, z, c): ... +def chebgrid3d(x, y, z, c): ... +def chebvander(x, deg): ... +def chebvander2d(x, y, deg): ... +def chebvander3d(x, y, z, deg): ... +def chebfit(x, y, deg, rcond=..., full=..., w=...): ... +def chebcompanion(c): ... +def chebroots(c): ... +def chebinterpolate(func, deg, args = ...): ... +def chebgauss(deg): ... +def chebweight(x): ... +def chebpts1(npts): ... +def chebpts2(npts): ... + +class Chebyshev(ABCPolyBase): + @classmethod + def interpolate(cls, func, deg, domain=..., args = ...): ... + domain: Any + window: Any + basis_name: Any diff --git a/phivenv/Lib/site-packages/numpy/polynomial/hermite.py b/phivenv/Lib/site-packages/numpy/polynomial/hermite.py new file mode 100644 index 0000000000000000000000000000000000000000..c0509f510d14b30c0f5fa6270fdefac877b0576b --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/hermite.py @@ -0,0 +1,1788 @@ +""" +============================================================== +Hermite Series, "Physicists" (:mod:`numpy.polynomial.hermite`) +============================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite series, including a `Hermite` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Hermite + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + hermdomain + hermzero + hermone + hermx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + hermadd + hermsub + hermmulx + hermmul + hermdiv + hermpow + hermval + hermval2d + hermval3d + hermgrid2d + hermgrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + hermder + hermint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + hermfromroots + hermroots + hermvander + hermvander2d + hermvander3d + hermgauss + hermweight + hermcompanion + hermfit + hermtrim + hermline + herm2poly + poly2herm + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd', + 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', + 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', + 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', + 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', + 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] + +hermtrim = pu.trimcoef + + +def poly2herm(pol): + """ + poly2herm(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herm2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import poly2herm + >>> poly2herm(np.arange(4)) + array([1. , 2.75 , 0.5 , 0.375]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermadd(hermmulx(res), pol[i]) + return res + + +def herm2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herm + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import herm2poly + >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + c[1] *= 2 + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1*(2*(i - 1))) + c1 = polyadd(tmp, polymulx(c1)*2) + return polyadd(c0, polymulx(c1)*2) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermdomain = np.array([-1., 1.]) + +# Hermite coefficients representing zero. +hermzero = np.array([0]) + +# Hermite coefficients representing one. +hermone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermx = np.array([0, 1/2]) + + +def hermline(off, scl): + """ + Hermite series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial.hermite import hermline, hermval + >>> hermval(0,hermline(3, 2)) + 3.0 + >>> hermval(1,hermline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl/2]) + else: + return np.array([off]) + + +def hermfromroots(roots): + """ + Generate a Hermite series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Hermite form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Hermite form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> from numpy.polynomial.hermite import hermfromroots, hermval + >>> coef = hermfromroots((-1, 0, 1)) + >>> hermval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = hermfromroots((-1j, 1j)) + >>> hermval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(hermline, hermmul, roots) + + +def hermadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermsub, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermadd + >>> hermadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def hermsub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermadd, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermsub + >>> hermsub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def hermmulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + hermadd, hermsub, hermmul, hermdiv, hermpow + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmulx + >>> hermmulx([1, 2, 3]) + array([2. , 6.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0]/2 + for i in range(1, len(c)): + prd[i + 1] = c[i]/2 + prd[i - 1] += c[i]*i + return prd + + +def hermmul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermadd, hermsub, hermmulx, hermdiv, hermpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmul + >>> hermmul([1, 2, 3], [0, 1, 2]) + array([52., 29., 52., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1))) + c1 = hermadd(tmp, hermmulx(c1)*2) + return hermadd(c0, hermmulx(c1)*2) + + +def hermdiv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermpow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermdiv + >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([2., 2.])) + >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 1.])) + + """ + return pu._div(hermmul, c1, c2) + + +def hermpow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermdiv + + Examples + -------- + >>> from numpy.polynomial.hermite import hermpow + >>> hermpow([1, 2, 3], 2) + array([81., 52., 82., 12., 9.]) + + """ + return pu._pow(hermmul, c, pow, maxpower) + + +def hermder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite series. + + Returns the Hermite series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2`` + while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If `c` is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermder + >>> hermder([ 1. , 0.5, 0.5, 0.5]) + array([1., 2., 3.]) + >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = (2*j)*c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite series. + + Returns the Hermite series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Hermite series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermint + >>> hermint([1,2,3]) # integrate once, value 0 at 0. + array([1. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary + >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0. + array([2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1 + array([-2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1) + array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0]/2 + for j in range(1, n): + tmp[j + 1] = c[j]/(2*(j + 1)) + tmp[0] += k[i] - hermval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermval(x, c, tensor=True): + """ + Evaluate an Hermite series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermval2d, hermgrid2d, hermval3d, hermgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval + >>> coef = [1,2,3] + >>> hermval(1, coef) + 11.0 + >>> hermval([[1,2],[3,4]], coef) + array([[ 11., 51.], + [115., 203.]]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + x2 = x*2 + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1*(2*(nd - 1)) + c1 = tmp + c1*x2 + return c0 + c1*x2 + + +def hermval2d(x, y, c): + """ + Evaluate a 2-D Hermite series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermval, hermgrid2d, hermval3d, hermgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval2d + >>> x = [1, 2] + >>> y = [4, 5] + >>> c = [[1, 2, 3], [4, 5, 6]] + >>> hermval2d(x, y, c) + array ([1035., 2883.]) + + """ + return pu._valnd(hermval, c, x, y) + + +def hermgrid2d(x, y, c): + """ + Evaluate a 2-D Hermite series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermval3d, hermgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.hermite import hermgrid2d + >>> x = [1, 2, 3] + >>> y = [4, 5] + >>> c = [[1, 2, 3], [4, 5, 6]] + >>> hermgrid2d(x, y, c) + array([[1035., 1599.], + [1867., 2883.], + [2699., 4167.]]) + + """ + return pu._gridnd(hermval, c, x, y) + + +def hermval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval3d + >>> x = [1, 2] + >>> y = [4, 5] + >>> z = [6, 7] + >>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] + >>> hermval3d(x, y, z, c) + array([ 40077., 120131.]) + + """ + return pu._valnd(hermval, c, x, y, z) + + +def hermgrid3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.hermite import hermgrid3d + >>> x = [1, 2] + >>> y = [4, 5] + >>> z = [6, 7] + >>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] + >>> hermgrid3d(x, y, z, c) + array([[[ 40077., 54117.], + [ 49293., 66561.]], + [[ 72375., 97719.], + [ 88975., 120131.]]]) + + """ + return pu._gridnd(hermval, c, x, y, z) + + +def hermvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = H_i(x), + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Hermite polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and + ``hermval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Hermite series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Hermite polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermvander + >>> x = np.array([-1, 0, 1]) + >>> hermvander(x, 3) + array([[ 1., -2., 2., 4.], + [ 1., 0., -2., -0.], + [ 1., 2., 2., -4.]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + x2 = x*2 + v[1] = x2 + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1))) + return np.moveaxis(v, 0, -1) + + +def hermvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = H_i(x) * H_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Hermite polynomials. + + If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermvander, hermvander3d, hermval2d, hermval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.hermite import hermvander2d + >>> x = np.array([-1, 0, 1]) + >>> y = np.array([-1, 0, 1]) + >>> hermvander2d(x, y, [2, 2]) + array([[ 1., -2., 2., -2., 4., -4., 2., -4., 4.], + [ 1., 0., -2., 0., 0., -0., -2., -0., 4.], + [ 1., 2., 2., 2., 4., 4., 2., 4., 4.]]) + + """ + return pu._vander_nd_flat((hermvander, hermvander), (x, y), deg) + + +def hermvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Hermite polynomials. + + If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermvander, hermvander3d, hermval2d, hermval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.hermite import hermvander3d + >>> x = np.array([-1, 0, 1]) + >>> y = np.array([-1, 0, 1]) + >>> z = np.array([-1, 0, 1]) + >>> hermvander3d(x, y, z, [0, 1, 2]) + array([[ 1., -2., 2., -2., 4., -4.], + [ 1., 0., -2., 0., 0., -0.], + [ 1., 2., 2., 2., 4., 4.]]) + + """ + return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg) + + +def hermfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a Hermite series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.polynomial.polyfit + numpy.polynomial.hermite_e.hermefit + hermval : Evaluates a Hermite series. + hermvander : Vandermonde matrix of Hermite series. + hermweight : Hermite weight function + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Hermite series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Hermite series are probably most useful when the data can be + approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the Hermite + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `hermweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.hermite import hermfit, hermval + >>> x = np.linspace(-10, 10) + >>> err = np.random.randn(len(x))/10 + >>> y = hermval(x, [1, 2, 3]) + err + >>> hermfit(x, y, 2) + array([1.0218, 1.9986, 2.9999]) # may vary + + """ + return pu._fit(hermvander, x, y, deg, rcond, full, w) + + +def hermcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Hermite basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.hermite import hermcompanion + >>> hermcompanion([1, 0, 1]) + array([[0. , 0.35355339], + [0.70710678, 0. ]]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-.5*c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.sqrt(.5*np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl*c[:-1]/(2.0*c[-1]) + return mat + + +def hermroots(c): + """ + Compute the roots of a Hermite series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * H_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Hermite series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermroots, hermfromroots + >>> coef = hermfromroots([-1, 0, 1]) + >>> coef + array([0. , 0.25 , 0. , 0.125]) + >>> hermroots(coef) + array([-1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-.5*c[0]/c[1]]) + + # rotated companion matrix reduces error + m = hermcompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def _normed_hermite_n(x, n): + """ + Evaluate a normalized Hermite polynomial. + + Compute the value of the normalized Hermite polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized Hermite function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + .. versionadded:: 1.10.0 + + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard Hermite functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1/np.sqrt(np.sqrt(np.pi))) + + c0 = 0. + c1 = 1./np.sqrt(np.sqrt(np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1*np.sqrt((nd - 1.)/nd) + c1 = tmp + c1*x*np.sqrt(2./nd) + nd = nd - 1.0 + return c0 + c1*x*np.sqrt(2) + + +def hermgauss(deg): + """ + Gauss-Hermite quadrature. + + Computes the sample points and weights for Gauss-Hermite quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`H_n`, and then scaling the results to get + the right value when integrating 1. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermgauss + >>> hermgauss(2) + (array([-0.70710678, 0.70710678]), array([0.88622693, 0.88622693])) + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1], dtype=np.float64) + m = hermcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_n(x, ideg) + df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1/(fm * fm) + + # for Hermite we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= np.sqrt(np.pi) / w.sum() + + return x, w + + +def hermweight(x): + """ + Weight function of the Hermite polynomials. + + The weight function is :math:`\\exp(-x^2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the Hermite polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.hermite import hermweight + >>> x = np.arange(-2, 2) + >>> hermweight(x) + array([0.01831564, 0.36787944, 1. , 0.36787944]) + + """ + w = np.exp(-x**2) + return w + + +# +# Hermite series class +# + +class Hermite(ABCPolyBase): + """An Hermite series class. + + The Hermite class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Hermite coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(x) + 3*H_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(hermadd) + _sub = staticmethod(hermsub) + _mul = staticmethod(hermmul) + _div = staticmethod(hermdiv) + _pow = staticmethod(hermpow) + _val = staticmethod(hermval) + _int = staticmethod(hermint) + _der = staticmethod(hermder) + _fit = staticmethod(hermfit) + _line = staticmethod(hermline) + _roots = staticmethod(hermroots) + _fromroots = staticmethod(hermfromroots) + + # Virtual properties + domain = np.array(hermdomain) + window = np.array(hermdomain) + basis_name = 'H' diff --git a/phivenv/Lib/site-packages/numpy/polynomial/hermite.pyi b/phivenv/Lib/site-packages/numpy/polynomial/hermite.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1e35d3dd1f2edba4e332b9d734f614d715af69f2 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/hermite.pyi @@ -0,0 +1,47 @@ +from typing import Any + +from numpy import int_, float64 +from numpy.typing import NDArray +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +hermtrim = trimcoef + +def poly2herm(pol): ... +def herm2poly(c): ... + +hermdomain: NDArray[int_] +hermzero: NDArray[int_] +hermone: NDArray[int_] +hermx: NDArray[float64] + +def hermline(off, scl): ... +def hermfromroots(roots): ... +def hermadd(c1, c2): ... +def hermsub(c1, c2): ... +def hermmulx(c): ... +def hermmul(c1, c2): ... +def hermdiv(c1, c2): ... +def hermpow(c, pow, maxpower=...): ... +def hermder(c, m=..., scl=..., axis=...): ... +def hermint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def hermval(x, c, tensor=...): ... +def hermval2d(x, y, c): ... +def hermgrid2d(x, y, c): ... +def hermval3d(x, y, z, c): ... +def hermgrid3d(x, y, z, c): ... +def hermvander(x, deg): ... +def hermvander2d(x, y, deg): ... +def hermvander3d(x, y, z, deg): ... +def hermfit(x, y, deg, rcond=..., full=..., w=...): ... +def hermcompanion(c): ... +def hermroots(c): ... +def hermgauss(deg): ... +def hermweight(x): ... + +class Hermite(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/phivenv/Lib/site-packages/numpy/polynomial/hermite_e.py b/phivenv/Lib/site-packages/numpy/polynomial/hermite_e.py new file mode 100644 index 0000000000000000000000000000000000000000..dd914c139cfeb311c1ee35202c858f66c90add33 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/hermite_e.py @@ -0,0 +1,1696 @@ +""" +=================================================================== +HermiteE Series, "Probabilists" (:mod:`numpy.polynomial.hermite_e`) +=================================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite_e series, including a `HermiteE` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + HermiteE + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + hermedomain + hermezero + hermeone + hermex + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + hermeadd + hermesub + hermemulx + hermemul + hermediv + hermepow + hermeval + hermeval2d + hermeval3d + hermegrid2d + hermegrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + hermeder + hermeint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + hermefromroots + hermeroots + hermevander + hermevander2d + hermevander3d + hermegauss + hermeweight + hermecompanion + hermefit + hermetrim + hermeline + herme2poly + poly2herme + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', + 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', + 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', + 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', + 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', + 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', + 'hermegauss', 'hermeweight'] + +hermetrim = pu.trimcoef + + +def poly2herme(pol): + """ + poly2herme(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herme2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import poly2herme + >>> poly2herme(np.arange(4)) + array([ 2., 10., 2., 3.]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermeadd(hermemulx(res), pol[i]) + return res + + +def herme2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herme + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import herme2poly + >>> herme2poly([ 2., 10., 2., 3.]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1*(i - 1)) + c1 = polyadd(tmp, polymulx(c1)) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermedomain = np.array([-1., 1.]) + +# Hermite coefficients representing zero. +hermezero = np.array([0]) + +# Hermite coefficients representing one. +hermeone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermex = np.array([0, 1]) + + +def hermeline(off, scl): + """ + Hermite series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeline + >>> from numpy.polynomial.hermite_e import hermeline, hermeval + >>> hermeval(0,hermeline(3, 2)) + 3.0 + >>> hermeval(1,hermeline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def hermefromroots(roots): + """ + Generate a HermiteE series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in HermiteE form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in HermiteE form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.chebyshev.chebfromroots + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval + >>> coef = hermefromroots((-1, 0, 1)) + >>> hermeval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = hermefromroots((-1j, 1j)) + >>> hermeval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(hermeline, hermemul, roots) + + +def hermeadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermesub, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeadd + >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def hermesub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermeadd, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermesub + >>> hermesub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def hermemulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemulx + >>> hermemulx([1, 2, 3]) + array([2., 7., 2., 3.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + for i in range(1, len(c)): + prd[i + 1] = c[i] + prd[i - 1] += c[i]*i + return prd + + +def hermemul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermediv, hermepow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemul + >>> hermemul([1, 2, 3], [0, 1, 2]) + array([14., 15., 28., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermesub(c[-i]*xs, c1*(nd - 1)) + c1 = hermeadd(tmp, hermemulx(c1)) + return hermeadd(c0, hermemulx(c1)) + + +def hermediv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermepow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermediv + >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 2.])) + + """ + return pu._div(hermemul, c1, c2) + + +def hermepow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermediv + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermepow + >>> hermepow([1, 2, 3], 2) + array([23., 28., 46., 12., 9.]) + + """ + return pu._pow(hermemul, c, pow, maxpower) + + +def hermeder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite_e series. + + Returns the series coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2`` + while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y) + + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1 + is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermeint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeder + >>> hermeder([ 1., 1., 1., 1.]) + array([1., 2., 3.]) + >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + return c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = j*c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite_e series. + + Returns the Hermite_e series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Hermite_e series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermeder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeint + >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. + array([1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) # may vary + >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. + array([2., 1., 1., 1.]) + >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 + array([-1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1) + array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j]/(j + 1) + tmp[0] += k[i] - hermeval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeval(x, c, tensor=True): + """ + Evaluate an HermiteE series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermeval2d, hermegrid2d, hermeval3d, hermegrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeval + >>> coef = [1,2,3] + >>> hermeval(1, coef) + 3.0 + >>> hermeval([[1,2],[3,4]], coef) + array([[ 3., 14.], + [31., 54.]]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1*(nd - 1) + c1 = tmp + c1*x + return c0 + c1*x + + +def hermeval2d(x, y, c): + """ + Evaluate a 2-D HermiteE series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermeval, hermegrid2d, hermeval3d, hermegrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(hermeval, c, x, y) + + +def hermegrid2d(x, y, c): + """ + Evaluate a 2-D HermiteE series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermeval3d, hermegrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(hermeval, c, x, y) + + +def hermeval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite_e series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermegrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(hermeval, c, x, y, z) + + +def hermegrid3d(x, y, z, c): + """ + Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermeval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(hermeval, c, x, y, z) + + +def hermevander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = He_i(x), + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the HermiteE polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and + ``hermeval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of HermiteE series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding HermiteE polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermevander + >>> x = np.array([-1, 0, 1]) + >>> hermevander(x, 3) + array([[ 1., -1., 0., 2.], + [ 1., 0., -1., -0.], + [ 1., 1., 0., -2.]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x - v[i-2]*(i - 1)) + return np.moveaxis(v, 0, -1) + + +def hermevander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = He_i(x) * He_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the HermiteE polynomials. + + If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermevander, hermevander3d, hermeval2d, hermeval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((hermevander, hermevander), (x, y), deg) + + +def hermevander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then Hehe pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the HermiteE polynomials. + + If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermevander, hermevander3d, hermeval2d, hermeval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((hermevander, hermevander, hermevander), (x, y, z), deg) + + +def hermefit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a HermiteE series of degree `deg` that is + the least squares fit to the data values `y` given at points `x`. If + `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D + multiple fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full = False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.polynomial.polyfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.laguerre.lagfit + hermeval : Evaluates a Hermite series. + hermevander : pseudo Vandermonde matrix of Hermite series. + hermeweight : HermiteE weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the HermiteE series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c` + are the coefficients to be solved for, and the elements of `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using HermiteE series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the HermiteE + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `hermeweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermefit, hermeval + >>> x = np.linspace(-10, 10) + >>> np.random.seed(123) + >>> err = np.random.randn(len(x))/10 + >>> y = hermeval(x, [1, 2, 3]) + err + >>> hermefit(x, y, 2) + array([ 1.01690445, 1.99951418, 2.99948696]) # may vary + + """ + return pu._fit(hermevander, x, y, deg, rcond, full, w) + + +def hermecompanion(c): + """ + Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an HermiteE basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of HermiteE series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.sqrt(np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl*c[:-1]/c[-1] + return mat + + +def hermeroots(c): + """ + Compute the roots of a HermiteE series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * He_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.chebyshev.chebroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The HermiteE series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots + >>> coef = hermefromroots([-1, 0, 1]) + >>> coef + array([0., 2., 0., 1.]) + >>> hermeroots(coef) + array([-1., 0., 1.]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + # rotated companion matrix reduces error + m = hermecompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def _normed_hermite_e_n(x, n): + """ + Evaluate a normalized HermiteE polynomial. + + Compute the value of the normalized HermiteE polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized HermiteE function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + .. versionadded:: 1.10.0 + + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard HermiteE functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1/np.sqrt(np.sqrt(2*np.pi))) + + c0 = 0. + c1 = 1./np.sqrt(np.sqrt(2*np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1*np.sqrt((nd - 1.)/nd) + c1 = tmp + c1*x*np.sqrt(1./nd) + nd = nd - 1.0 + return c0 + c1*x + + +def hermegauss(deg): + """ + Gauss-HermiteE quadrature. + + Computes the sample points and weights for Gauss-HermiteE quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2/2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`He_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = hermecompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_e_n(x, ideg) + df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_e_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1/(fm * fm) + + # for Hermite_e we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= np.sqrt(2*np.pi) / w.sum() + + return x, w + + +def hermeweight(x): + """Weight function of the Hermite_e polynomials. + + The weight function is :math:`\\exp(-x^2/2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the HermiteE polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = np.exp(-.5*x**2) + return w + + +# +# HermiteE series class +# + +class HermiteE(ABCPolyBase): + """An HermiteE series class. + + The HermiteE class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + HermiteE coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(hermeadd) + _sub = staticmethod(hermesub) + _mul = staticmethod(hermemul) + _div = staticmethod(hermediv) + _pow = staticmethod(hermepow) + _val = staticmethod(hermeval) + _int = staticmethod(hermeint) + _der = staticmethod(hermeder) + _fit = staticmethod(hermefit) + _line = staticmethod(hermeline) + _roots = staticmethod(hermeroots) + _fromroots = staticmethod(hermefromroots) + + # Virtual properties + domain = np.array(hermedomain) + window = np.array(hermedomain) + basis_name = 'He' diff --git a/phivenv/Lib/site-packages/numpy/polynomial/hermite_e.pyi b/phivenv/Lib/site-packages/numpy/polynomial/hermite_e.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a8c0fe09dadb3f37e2816cfaed0129df670b0456 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/hermite_e.pyi @@ -0,0 +1,47 @@ +from typing import Any + +from numpy import int_ +from numpy.typing import NDArray +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +hermetrim = trimcoef + +def poly2herme(pol): ... +def herme2poly(c): ... + +hermedomain: NDArray[int_] +hermezero: NDArray[int_] +hermeone: NDArray[int_] +hermex: NDArray[int_] + +def hermeline(off, scl): ... +def hermefromroots(roots): ... +def hermeadd(c1, c2): ... +def hermesub(c1, c2): ... +def hermemulx(c): ... +def hermemul(c1, c2): ... +def hermediv(c1, c2): ... +def hermepow(c, pow, maxpower=...): ... +def hermeder(c, m=..., scl=..., axis=...): ... +def hermeint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def hermeval(x, c, tensor=...): ... +def hermeval2d(x, y, c): ... +def hermegrid2d(x, y, c): ... +def hermeval3d(x, y, z, c): ... +def hermegrid3d(x, y, z, c): ... +def hermevander(x, deg): ... +def hermevander2d(x, y, deg): ... +def hermevander3d(x, y, z, deg): ... +def hermefit(x, y, deg, rcond=..., full=..., w=...): ... +def hermecompanion(c): ... +def hermeroots(c): ... +def hermegauss(deg): ... +def hermeweight(x): ... + +class HermiteE(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/phivenv/Lib/site-packages/numpy/polynomial/laguerre.py b/phivenv/Lib/site-packages/numpy/polynomial/laguerre.py new file mode 100644 index 0000000000000000000000000000000000000000..e6b9d939eef47a74f4aa04ba75b05a303b11e5db --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/laguerre.py @@ -0,0 +1,1720 @@ +""" +================================================== +Laguerre Series (:mod:`numpy.polynomial.laguerre`) +================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Laguerre series, including a `Laguerre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Laguerre + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + lagdomain + lagzero + lagone + lagx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + lagadd + lagsub + lagmulx + lagmul + lagdiv + lagpow + lagval + lagval2d + lagval3d + laggrid2d + laggrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + lagder + lagint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + lagfromroots + lagroots + lagvander + lagvander2d + lagvander3d + laggauss + lagweight + lagcompanion + lagfit + lagtrim + lagline + lag2poly + poly2lag + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', + 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', + 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', + 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', + 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', + 'laggauss', 'lagweight'] + +lagtrim = pu.trimcoef + + +def poly2lag(pol): + """ + poly2lag(pol) + + Convert a polynomial to a Laguerre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Laguerre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Laguerre + series. + + See Also + -------- + lag2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.laguerre import poly2lag + >>> poly2lag(np.arange(4)) + array([ 23., -63., 58., -18.]) + + """ + [pol] = pu.as_series([pol]) + res = 0 + for p in pol[::-1]: + res = lagadd(lagmulx(res), p) + return res + + +def lag2poly(c): + """ + Convert a Laguerre series to a polynomial. + + Convert an array representing the coefficients of a Laguerre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Laguerre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2lag + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lag2poly + >>> lag2poly([ 23., -63., 58., -18.]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1*(i - 1))/i) + c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i) + return polyadd(c0, polysub(c1, polymulx(c1))) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Laguerre +lagdomain = np.array([0., 1.]) + +# Laguerre coefficients representing zero. +lagzero = np.array([0]) + +# Laguerre coefficients representing one. +lagone = np.array([1]) + +# Laguerre coefficients representing the identity x. +lagx = np.array([1, -1]) + + +def lagline(off, scl): + """ + Laguerre series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Laguerre series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagline, lagval + >>> lagval(0,lagline(3, 2)) + 3.0 + >>> lagval(1,lagline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off + scl, -scl]) + else: + return np.array([off]) + + +def lagfromroots(roots): + """ + Generate a Laguerre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Laguerre form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Laguerre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagfromroots, lagval + >>> coef = lagfromroots((-1, 0, 1)) + >>> lagval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = lagfromroots((-1j, 1j)) + >>> lagval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(lagline, lagmul, roots) + + +def lagadd(c1, c2): + """ + Add one Laguerre series to another. + + Returns the sum of two Laguerre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Laguerre series of their sum. + + See Also + -------- + lagsub, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Laguerre series + is a Laguerre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagadd + >>> lagadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def lagsub(c1, c2): + """ + Subtract one Laguerre series from another. + + Returns the difference of two Laguerre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their difference. + + See Also + -------- + lagadd, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Laguerre + series is a Laguerre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagsub + >>> lagsub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def lagmulx(c): + """Multiply a Laguerre series by x. + + Multiply the Laguerre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + lagadd, lagsub, lagmul, lagdiv, lagpow + + Notes + ----- + The multiplication uses the recursion relationship for Laguerre + polynomials in the form + + .. math:: + + xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmulx + >>> lagmulx([1, 2, 3]) + array([-1., -1., 11., -9.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] + prd[1] = -c[0] + for i in range(1, len(c)): + prd[i + 1] = -c[i]*(i + 1) + prd[i] += c[i]*(2*i + 1) + prd[i - 1] -= c[i]*i + return prd + + +def lagmul(c1, c2): + """ + Multiply one Laguerre series by another. + + Returns the product of two Laguerre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their product. + + See Also + -------- + lagadd, lagsub, lagmulx, lagdiv, lagpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Laguerre polynomial basis set. Thus, to express + the product as a Laguerre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmul + >>> lagmul([1, 2, 3], [0, 1, 2]) + array([ 8., -13., 38., -51., 36.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd) + c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd) + return lagadd(c0, lagsub(c1, lagmulx(c1))) + + +def lagdiv(c1, c2): + """ + Divide one Laguerre series by another. + + Returns the quotient-with-remainder of two Laguerre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Laguerre series coefficients representing the quotient and + remainder. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagpow + + Notes + ----- + In general, the (polynomial) division of one Laguerre series by another + results in quotient and remainder terms that are not in the Laguerre + polynomial basis set. Thus, to express these results as a Laguerre + series, it is necessary to "reproject" the results onto the Laguerre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagdiv + >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 1.])) + + """ + return pu._div(lagmul, c1, c2) + + +def lagpow(c, pow, maxpower=16): + """Raise a Laguerre series to a power. + + Returns the Laguerre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Laguerre series of power. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagdiv + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagpow + >>> lagpow([1, 2, 3], 2) + array([ 14., -16., 56., -72., 54.]) + + """ + return pu._pow(lagmul, c, pow, maxpower) + + +def lagder(c, m=1, scl=1, axis=0): + """ + Differentiate a Laguerre series. + + Returns the Laguerre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Laguerre series of the derivative. + + See Also + -------- + lagint + + Notes + ----- + In general, the result of differentiating a Laguerre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagder + >>> lagder([ 1., 1., 1., -3.]) + array([1., 2., 3.]) + >>> lagder([ 1., 0., 0., -4., 3.], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 1, -1): + der[j - 1] = -c[j] + c[j - 1] += c[j] + der[0] = -c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Laguerre series. + + Returns the Laguerre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Laguerre series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + lagder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagint + >>> lagint([1,2,3]) + array([ 1., 1., 1., -3.]) + >>> lagint([1,2,3], m=2) + array([ 1., 0., 0., -4., 3.]) + >>> lagint([1,2,3], k=1) + array([ 2., 1., 1., -3.]) + >>> lagint([1,2,3], lbnd=-1) + array([11.5, 1. , 1. , -3. ]) + >>> lagint([1,2], m=2, k=[1,2], lbnd=-1) + array([ 11.16666667, -5. , -3. , 2. ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] + tmp[1] = -c[0] + for j in range(1, n): + tmp[j] += c[j] + tmp[j + 1] = -c[j] + tmp[0] += k[i] - lagval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagval(x, c, tensor=True): + """ + Evaluate a Laguerre series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + lagval2d, laggrid2d, lagval3d, laggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval + >>> coef = [1, 2, 3] + >>> lagval(1, coef) + -0.5 + >>> lagval([[1, 2],[3, 4]], coef) + array([[-0.5, -4. ], + [-4.5, -2. ]]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - (c1*(nd - 1))/nd + c1 = tmp + (c1*((2*nd - 1) - x))/nd + return c0 + c1*(1 - x) + + +def lagval2d(x, y, c): + """ + Evaluate a 2-D Laguerre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + lagval, laggrid2d, lagval3d, laggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval2d + >>> c = [[1, 2],[3, 4]] + >>> lagval2d(1, 1, c) + 1.0 + """ + return pu._valnd(lagval, c, x, y) + + +def laggrid2d(x, y, c): + """ + Evaluate a 2-D Laguerre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, lagval3d, laggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.laguerre import laggrid2d + >>> c = [[1, 2], [3, 4]] + >>> laggrid2d([0, 1], [0, 1], c) + array([[10., 4.], + [ 3., 1.]]) + + """ + return pu._gridnd(lagval, c, x, y) + + +def lagval3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + lagval, lagval2d, laggrid2d, laggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval3d + >>> c = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> lagval3d(1, 1, 2, c) + -1.0 + + """ + return pu._valnd(lagval, c, x, y, z) + + +def laggrid3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, laggrid2d, lagval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.laguerre import laggrid3d + >>> c = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> laggrid3d([0, 1], [0, 1], [2, 4], c) + array([[[ -4., -44.], + [ -2., -18.]], + [[ -2., -14.], + [ -1., -5.]]]) + + """ + return pu._gridnd(lagval, c, x, y, z) + + +def lagvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Laguerre polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and + ``lagval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Laguerre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Laguerre polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagvander + >>> x = np.array([0, 1, 2]) + >>> lagvander(x, 3) + array([[ 1. , 1. , 1. , 1. ], + [ 1. , 0. , -0.5 , -0.66666667], + [ 1. , -1. , -1. , -0.33333333]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = 1 - x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i + return np.moveaxis(v, 0, -1) + + +def lagvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Laguerre polynomials. + + If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + lagvander, lagvander3d, lagval2d, lagval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagvander2d + >>> x = np.array([0]) + >>> y = np.array([2]) + >>> lagvander2d(x, y, [2, 1]) + array([[ 1., -1., 1., -1., 1., -1.]]) + + """ + return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg) + + +def lagvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Laguerre polynomials. + + If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + lagvander, lagvander3d, lagval2d, lagval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagvander3d + >>> x = np.array([0]) + >>> y = np.array([2]) + >>> z = np.array([0]) + >>> lagvander3d(x, y, z, [2, 1, 3]) + array([[ 1., 1., 1., 1., -1., -1., -1., -1., 1., 1., 1., 1., -1., + -1., -1., -1., 1., 1., 1., 1., -1., -1., -1., -1.]]) + + """ + return pu._vander_nd_flat((lagvander, lagvander, lagvander), (x, y, z), deg) + + +def lagfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Laguerre series to data. + + Return the coefficients of a Laguerre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where ``n`` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Laguerre coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column *k* of `y` are in column + *k*. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.legendre.legfit + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + lagval : Evaluates a Laguerre series. + lagvander : pseudo Vandermonde matrix of Laguerre series. + lagweight : Laguerre weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Laguerre series ``p`` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where ``V`` is the weighted pseudo Vandermonde matrix of `x`, ``c`` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of ``V``. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Laguerre series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the Laguerre + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `lagweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagfit, lagval + >>> x = np.linspace(0, 10) + >>> err = np.random.randn(len(x))/10 + >>> y = lagval(x, [1, 2, 3]) + err + >>> lagfit(x, y, 2) + array([ 0.96971004, 2.00193749, 3.00288744]) # may vary + + """ + return pu._fit(lagvander, x, y, deg, rcond, full, w) + + +def lagcompanion(c): + """ + Return the companion matrix of c. + + The usual companion matrix of the Laguerre polynomials is already + symmetric when `c` is a basis Laguerre polynomial, so no scaling is + applied. + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagcompanion + >>> lagcompanion([1, 2, 3]) + array([[ 1. , -0.33333333], + [-1. , 4.33333333]]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[1 + c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + top = mat.reshape(-1)[1::n+1] + mid = mat.reshape(-1)[0::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = -np.arange(1, n) + mid[...] = 2.*np.arange(n) + 1. + bot[...] = top + mat[:, -1] += (c[:-1]/c[-1])*n + return mat + + +def lagroots(c): + """ + Compute the roots of a Laguerre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Laguerre series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagroots, lagfromroots + >>> coef = lagfromroots([0, 1, 2]) + >>> coef + array([ 2., -8., 12., -6.]) + >>> lagroots(coef) + array([-4.4408921e-16, 1.0000000e+00, 2.0000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([1 + c[0]/c[1]]) + + # rotated companion matrix reduces error + m = lagcompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def laggauss(deg): + """ + Gauss-Laguerre quadrature. + + Computes the sample points and weights for Gauss-Laguerre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]` + with the weight function :math:`f(x) = \\exp(-x)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100 higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + Examples + -------- + >>> from numpy.polynomial.laguerre import laggauss + >>> laggauss(2) + (array([0.58578644, 3.41421356]), array([0.85355339, 0.14644661])) + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = lagcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = lagval(x, c) + df = lagval(x, lagder(c)) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = lagval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1/(fm * df) + + # scale w to get the right value, 1 in this case + w /= w.sum() + + return x, w + + +def lagweight(x): + """Weight function of the Laguerre polynomials. + + The weight function is :math:`exp(-x)` and the interval of integration + is :math:`[0, \\inf]`. The Laguerre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagweight + >>> x = np.array([0, 1, 2]) + >>> lagweight(x) + array([1. , 0.36787944, 0.13533528]) + + """ + w = np.exp(-x) + return w + +# +# Laguerre series class +# + +class Laguerre(ABCPolyBase): + """A Laguerre series class. + + The Laguerre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Laguerre coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [0, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [0, 1]. + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(lagadd) + _sub = staticmethod(lagsub) + _mul = staticmethod(lagmul) + _div = staticmethod(lagdiv) + _pow = staticmethod(lagpow) + _val = staticmethod(lagval) + _int = staticmethod(lagint) + _der = staticmethod(lagder) + _fit = staticmethod(lagfit) + _line = staticmethod(lagline) + _roots = staticmethod(lagroots) + _fromroots = staticmethod(lagfromroots) + + # Virtual properties + domain = np.array(lagdomain) + window = np.array(lagdomain) + basis_name = 'L' diff --git a/phivenv/Lib/site-packages/numpy/polynomial/laguerre.pyi b/phivenv/Lib/site-packages/numpy/polynomial/laguerre.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1155442e2a52f040513f0398bb5dd818aebc4f3a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/laguerre.pyi @@ -0,0 +1,47 @@ +from typing import Any + +from numpy import int_ +from numpy.typing import NDArray +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +lagtrim = trimcoef + +def poly2lag(pol): ... +def lag2poly(c): ... + +lagdomain: NDArray[int_] +lagzero: NDArray[int_] +lagone: NDArray[int_] +lagx: NDArray[int_] + +def lagline(off, scl): ... +def lagfromroots(roots): ... +def lagadd(c1, c2): ... +def lagsub(c1, c2): ... +def lagmulx(c): ... +def lagmul(c1, c2): ... +def lagdiv(c1, c2): ... +def lagpow(c, pow, maxpower=...): ... +def lagder(c, m=..., scl=..., axis=...): ... +def lagint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def lagval(x, c, tensor=...): ... +def lagval2d(x, y, c): ... +def laggrid2d(x, y, c): ... +def lagval3d(x, y, z, c): ... +def laggrid3d(x, y, z, c): ... +def lagvander(x, deg): ... +def lagvander2d(x, y, deg): ... +def lagvander3d(x, y, z, deg): ... +def lagfit(x, y, deg, rcond=..., full=..., w=...): ... +def lagcompanion(c): ... +def lagroots(c): ... +def laggauss(deg): ... +def lagweight(x): ... + +class Laguerre(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/phivenv/Lib/site-packages/numpy/polynomial/legendre.py b/phivenv/Lib/site-packages/numpy/polynomial/legendre.py new file mode 100644 index 0000000000000000000000000000000000000000..b1eda69fc911777ab422f837ec5ab98f071e4de4 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/legendre.py @@ -0,0 +1,1665 @@ +""" +================================================== +Legendre Series (:mod:`numpy.polynomial.legendre`) +================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Legendre series, including a `Legendre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Legendre + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + legdomain + legzero + legone + legx + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + legadd + legsub + legmulx + legmul + legdiv + legpow + legval + legval2d + legval3d + leggrid2d + leggrid3d + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + legder + legint + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + legfromroots + legroots + legvander + legvander2d + legvander3d + leggauss + legweight + legcompanion + legfit + legtrim + legline + leg2poly + poly2leg + +See also +-------- +numpy.polynomial + +""" +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', + 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', + 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', + 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', + 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', + 'leggauss', 'legweight'] + +legtrim = pu.trimcoef + + +def poly2leg(pol): + """ + Convert a polynomial to a Legendre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Legendre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Legendre + series. + + See Also + -------- + leg2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> p = P.Polynomial(np.arange(4)) + >>> p + Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], ... + >>> c = P.Legendre(P.legendre.poly2leg(p.coef)) + >>> c + Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = legadd(legmulx(res), pol[i]) + return res + + +def leg2poly(c): + """ + Convert a Legendre series to a polynomial. + + Convert an array representing the coefficients of a Legendre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Legendre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2leg + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Legendre(range(4)) + >>> c + Legendre([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x') + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-1. , -3.5, 3. , 7.5], domain=[-1., 1.], window=[-1., ... + >>> P.legendre.leg2poly(range(4)) + array([-1. , -3.5, 3. , 7.5]) + + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1*(i - 1))/i) + c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Legendre +legdomain = np.array([-1., 1.]) + +# Legendre coefficients representing zero. +legzero = np.array([0]) + +# Legendre coefficients representing one. +legone = np.array([1]) + +# Legendre coefficients representing the identity x. +legx = np.array([0, 1]) + + +def legline(off, scl): + """ + Legendre series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Legendre series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legline(3,2) + array([3, 2]) + >>> L.legval(-3, L.legline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def legfromroots(roots): + """ + Generate a Legendre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Legendre form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Legendre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.4, 0. , 0.4]) + >>> j = complex(0,1) + >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary + + """ + return pu._fromroots(legline, legmul, roots) + + +def legadd(c1, c2): + """ + Add one Legendre series to another. + + Returns the sum of two Legendre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Legendre series of their sum. + + See Also + -------- + legsub, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Legendre series + is a Legendre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legadd(c1,c2) + array([4., 4., 4.]) + + """ + return pu._add(c1, c2) + + +def legsub(c1, c2): + """ + Subtract one Legendre series from another. + + Returns the difference of two Legendre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their difference. + + See Also + -------- + legadd, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Legendre + series is a Legendre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legsub(c1,c2) + array([-2., 0., 2.]) + >>> L.legsub(c2,c1) # -C.legsub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def legmulx(c): + """Multiply a Legendre series by x. + + Multiply the Legendre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + legadd, legmul, legdiv, legpow + + Notes + ----- + The multiplication uses the recursion relationship for Legendre + polynomials in the form + + .. math:: + + xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> L.legmulx([1,2,3]) + array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + for i in range(1, len(c)): + j = i + 1 + k = i - 1 + s = i + j + prd[j] = (c[i]*j)/s + prd[k] += (c[i]*i)/s + return prd + + +def legmul(c1, c2): + """ + Multiply one Legendre series by another. + + Returns the product of two Legendre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their product. + + See Also + -------- + legadd, legsub, legmulx, legdiv, legpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Legendre polynomial basis set. Thus, to express + the product as a Legendre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2) + >>> L.legmul(c1,c2) # multiplication requires "reprojection" + array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) # may vary + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd) + c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd) + return legadd(c0, legmulx(c1)) + + +def legdiv(c1, c2): + """ + Divide one Legendre series by another. + + Returns the quotient-with-remainder of two Legendre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + quo, rem : ndarrays + Of Legendre series coefficients representing the quotient and + remainder. + + See Also + -------- + legadd, legsub, legmulx, legmul, legpow + + Notes + ----- + In general, the (polynomial) division of one Legendre series by another + results in quotient and remainder terms that are not in the Legendre + polynomial basis set. Thus, to express these results as a Legendre + series, it is necessary to "reproject" the results onto the Legendre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not + (array([3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> L.legdiv(c2,c1) # neither "intuitive" + (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary + + """ + return pu._div(legmul, c1, c2) + + +def legpow(c, pow, maxpower=16): + """Raise a Legendre series to a power. + + Returns the Legendre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Legendre series of power. + + See Also + -------- + legadd, legsub, legmulx, legmul, legdiv + + """ + return pu._pow(legmul, c, pow, maxpower) + + +def legder(c, m=1, scl=1, axis=0): + """ + Differentiate a Legendre series. + + Returns the Legendre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Legendre series of the derivative. + + See Also + -------- + legint + + Notes + ----- + In general, the result of differentiating a Legendre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3,4) + >>> L.legder(c) + array([ 6., 9., 20.]) + >>> L.legder(c, 3) + array([60.]) + >>> L.legder(c, scl=-1) + array([ -6., -9., -20.]) + >>> L.legder(c, 2,-1) + array([ 9., 60.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2*j - 1)*c[j] + c[j - 2] += c[j] + if n > 1: + der[1] = 3*c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Legendre series. + + Returns the Legendre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Legendre series coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + legder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3) + >>> L.legint(c) + array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, 3) + array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, # may vary + -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) + >>> L.legint(c, k=3) + array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, lbnd=-2) + array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, scl=2) + array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1]/3 + for j in range(2, n): + t = c[j]/(2*j + 1) + tmp[j + 1] = t + tmp[j - 1] -= t + tmp[0] += k[i] - legval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def legval(x, c, tensor=True): + """ + Evaluate a Legendre series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + legval2d, leggrid2d, legval3d, leggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - (c1*(nd - 1))/nd + c1 = tmp + (c1*x*(2*nd - 1))/nd + return c0 + c1*x + + +def legval2d(x, y, c): + """ + Evaluate a 2-D Legendre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Legendre series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + legval, leggrid2d, legval3d, leggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(legval, c, x, y) + + +def leggrid2d(x, y, c): + """ + Evaluate a 2-D Legendre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + legval, legval2d, legval3d, leggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(legval, c, x, y) + + +def legval3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + legval, legval2d, leggrid2d, leggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(legval, c, x, y, z) + + +def leggrid3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + legval, legval2d, leggrid2d, legval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(legval, c, x, y, z) + + +def legvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Legendre polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and + ``legval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Legendre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Legendre polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. This is not as accurate + # as reverse recursion in this application but it is more efficient. + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i + return np.moveaxis(v, 0, -1) + + +def legvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Legendre polynomials. + + If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + legvander, legvander3d, legval2d, legval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((legvander, legvander), (x, y), deg) + + +def legvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Legendre polynomials. + + If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + legvander, legvander3d, legval2d, legval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((legvander, legvander, legvander), (x, y, z), deg) + + +def legfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Legendre series to data. + + Return the coefficients of a Legendre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Legendre coefficients ordered from low to high. If `y` was + 2-D, the coefficients for the data in column k of `y` are in + column `k`. If `deg` is specified as a list, coefficients for + terms not included in the fit are set equal to zero in the + returned `coef`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + legval : Evaluates a Legendre series. + legvander : Vandermonde matrix of Legendre series. + legweight : Legendre weight function (= 1). + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Legendre series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Legendre series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + return pu._fit(legvander, x, y, deg, rcond, full, w) + + +def legcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Legendre basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = 1./np.sqrt(2*np.arange(n) + 1) + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n] + bot[...] = top + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1)) + return mat + + +def legroots(c): + """ + Compute the roots of a Legendre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such values. + Roots with multiplicity greater than 1 will also show larger errors as + the value of the series near such points is relatively insensitive to + errors in the roots. Isolated roots near the origin can be improved by + a few iterations of Newton's method. + + The Legendre series basis polynomials aren't powers of ``x`` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.legendre as leg + >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots + array([-0.85099543, -0.11407192, 0.51506735]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + # rotated companion matrix reduces error + m = legcompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def leggauss(deg): + """ + Gauss-Legendre quadrature. + + Computes the sample points and weights for Gauss-Legendre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = legcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = legval(x, c) + df = legval(x, legder(c)) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = legval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1/(fm * df) + + # for Legendre we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= 2. / w.sum() + + return x, w + + +def legweight(x): + """ + Weight function of the Legendre polynomials. + + The weight function is :math:`1` and the interval of integration is + :math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = x*0.0 + 1.0 + return w + +# +# Legendre series class +# + +class Legendre(ABCPolyBase): + """A Legendre series class. + + The Legendre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Legendre coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(legadd) + _sub = staticmethod(legsub) + _mul = staticmethod(legmul) + _div = staticmethod(legdiv) + _pow = staticmethod(legpow) + _val = staticmethod(legval) + _int = staticmethod(legint) + _der = staticmethod(legder) + _fit = staticmethod(legfit) + _line = staticmethod(legline) + _roots = staticmethod(legroots) + _fromroots = staticmethod(legfromroots) + + # Virtual properties + domain = np.array(legdomain) + window = np.array(legdomain) + basis_name = 'P' diff --git a/phivenv/Lib/site-packages/numpy/polynomial/legendre.pyi b/phivenv/Lib/site-packages/numpy/polynomial/legendre.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e2ff7eb92d22ad48a045d4fd1b85df6b7e440a8a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/legendre.pyi @@ -0,0 +1,47 @@ +from typing import Any + +from numpy import int_ +from numpy.typing import NDArray +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +legtrim = trimcoef + +def poly2leg(pol): ... +def leg2poly(c): ... + +legdomain: NDArray[int_] +legzero: NDArray[int_] +legone: NDArray[int_] +legx: NDArray[int_] + +def legline(off, scl): ... +def legfromroots(roots): ... +def legadd(c1, c2): ... +def legsub(c1, c2): ... +def legmulx(c): ... +def legmul(c1, c2): ... +def legdiv(c1, c2): ... +def legpow(c, pow, maxpower=...): ... +def legder(c, m=..., scl=..., axis=...): ... +def legint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def legval(x, c, tensor=...): ... +def legval2d(x, y, c): ... +def leggrid2d(x, y, c): ... +def legval3d(x, y, z, c): ... +def leggrid3d(x, y, z, c): ... +def legvander(x, deg): ... +def legvander2d(x, y, deg): ... +def legvander3d(x, y, z, deg): ... +def legfit(x, y, deg, rcond=..., full=..., w=...): ... +def legcompanion(c): ... +def legroots(c): ... +def leggauss(deg): ... +def legweight(x): ... + +class Legendre(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/phivenv/Lib/site-packages/numpy/polynomial/polynomial.py b/phivenv/Lib/site-packages/numpy/polynomial/polynomial.py new file mode 100644 index 0000000000000000000000000000000000000000..5f5bcc5a3e8893a02d108d73ad16f72c63b528a0 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/polynomial.py @@ -0,0 +1,1655 @@ +""" +================================================= +Power Series (:mod:`numpy.polynomial.polynomial`) +================================================= + +This module provides a number of objects (mostly functions) useful for +dealing with polynomials, including a `Polynomial` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with polynomial objects is in +the docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Polynomial + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + polydomain + polyzero + polyone + polyx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + polyadd + polysub + polymulx + polymul + polydiv + polypow + polyval + polyval2d + polyval3d + polygrid2d + polygrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + polyder + polyint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + polyfromroots + polyroots + polyvalfromroots + polyvander + polyvander2d + polyvander3d + polycompanion + polyfit + polytrim + polyline + +See Also +-------- +`numpy.polynomial` + +""" +__all__ = [ + 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', + 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', + 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander', + 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', + 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d', + 'polycompanion'] + +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +polytrim = pu.trimcoef + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Polynomial default domain. +polydomain = np.array([-1., 1.]) + +# Polynomial coefficients representing zero. +polyzero = np.array([0]) + +# Polynomial coefficients representing one. +polyone = np.array([1]) + +# Polynomial coefficients representing the identity x. +polyx = np.array([0, 1]) + +# +# Polynomial series functions +# + + +def polyline(off, scl): + """ + Returns an array representing a linear polynomial. + + Parameters + ---------- + off, scl : scalars + The "y-intercept" and "slope" of the line, respectively. + + Returns + ------- + y : ndarray + This module's representation of the linear polynomial ``off + + scl*x``. + + See Also + -------- + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyline(1, -1) + array([ 1, -1]) + >>> P.polyval(1, P.polyline(1, -1)) # should be 0 + 0.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def polyfromroots(roots): + """ + Generate a monic polynomial with given roots. + + Return the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + where the :math:`r_n` are the roots specified in `roots`. If a zero has + multiplicity n, then it must appear in `roots` n times. For instance, + if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, + then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear + in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * x + ... + x^n + + The coefficient of the last term is 1 for monic polynomials in this + form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of the polynomial's coefficients If all the roots are + real, then `out` is also real, otherwise it is complex. (see + Examples below). + + See Also + -------- + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Notes + ----- + The coefficients are determined by multiplying together linear factors + of the form ``(x - r_i)``, i.e. + + .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n) + + where ``n == len(roots) - 1``; note that this implies that ``1`` is always + returned for :math:`a_n`. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x + array([ 0., -1., 0., 1.]) + >>> j = complex(0,1) + >>> P.polyfromroots((-j,j)) # complex returned, though values are real + array([1.+0.j, 0.+0.j, 1.+0.j]) + + """ + return pu._fromroots(polyline, polymul, roots) + + +def polyadd(c1, c2): + """ + Add one polynomial to another. + + Returns the sum of two polynomials `c1` + `c2`. The arguments are + sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + out : ndarray + The coefficient array representing their sum. + + See Also + -------- + polysub, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> sum = P.polyadd(c1,c2); sum + array([4., 4., 4.]) + >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) + 28.0 + + """ + return pu._add(c1, c2) + + +def polysub(c1, c2): + """ + Subtract one polynomial from another. + + Returns the difference of two polynomials `c1` - `c2`. The arguments + are sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of coefficients representing their difference. + + See Also + -------- + polyadd, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> P.polysub(c1,c2) + array([-2., 0., 2.]) + >>> P.polysub(c2, c1) # -P.polysub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def polymulx(c): + """Multiply a polynomial by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + polyadd, polysub, polymul, polydiv, polypow + + Notes + ----- + + .. versionadded:: 1.5.0 + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3) + >>> P.polymulx(c) + array([0., 1., 2., 3.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1:] = c + return prd + + +def polymul(c1, c2): + """ + Multiply one polynomial by another. + + Returns the product of two polynomials `c1` * `c2`. The arguments are + sequences of coefficients, from lowest order term to highest, e.g., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of coefficients representing a polynomial, relative to the + "standard" basis, and ordered from lowest order term to highest. + + Returns + ------- + out : ndarray + Of the coefficients of their product. + + See Also + -------- + polyadd, polysub, polymulx, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> P.polymul(c1, c2) + array([ 3., 8., 14., 8., 3.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + ret = np.convolve(c1, c2) + return pu.trimseq(ret) + + +def polydiv(c1, c2): + """ + Divide one polynomial by another. + + Returns the quotient-with-remainder of two polynomials `c1` / `c2`. + The arguments are sequences of coefficients, from lowest order term + to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + [quo, rem] : ndarrays + Of coefficient series representing the quotient and remainder. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> P.polydiv(c1, c2) + (array([3.]), array([-8., -4.])) + >>> P.polydiv(c2, c1) + (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + # note: this is more efficient than `pu._div(polymul, c1, c2)` + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + dlen = lc1 - lc2 + scl = c2[-1] + c2 = c2[:-1]/scl + i = dlen + j = lc1 - 1 + while i >= 0: + c1[i:j] -= c2*c1[j] + i -= 1 + j -= 1 + return c1[j+1:]/scl, pu.trimseq(c1[:j+1]) + + +def polypow(c, pow, maxpower=None): + """Raise a polynomial to a power. + + Returns the polynomial `c` raised to the power `pow`. The argument + `c` is a sequence of coefficients ordered from low to high. i.e., + [1,2,3] is the series ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c : array_like + 1-D array of array of series coefficients ordered from low to + high degree. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Power series of power. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polydiv + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polypow([1, 2, 3], 2) + array([ 1., 4., 10., 12., 9.]) + + """ + # note: this is more efficient than `pu._pow(polymul, c1, c2)`, as it + # avoids calling `as_series` repeatedly + return pu._pow(np.convolve, c, pow, maxpower) + + +def polyder(c, m=1, scl=1, axis=0): + """ + Differentiate a polynomial. + + Returns the polynomial coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The + argument `c` is an array of coefficients from low to high degree along + each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` + while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is + ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of polynomial coefficients. If c is multidimensional the + different axis correspond to different variables with the degree + in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change + of variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Polynomial coefficients of the derivative. + + See Also + -------- + polyint + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3, 4) + >>> P.polyder(c) # (d/dx)(c) + array([ 2., 6., 12.]) + >>> P.polyder(c, 3) # (d**3/dx**3)(c) + array([24.]) + >>> P.polyder(c, scl=-1) # (d/d(-x))(c) + array([ -2., -6., -12.]) + >>> P.polyder(c, 2, -1) # (d**2/d(-x)**2)(c) + array([ 6., 24.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + cdt = c.dtype + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=cdt) + for j in range(n, 0, -1): + der[j - 1] = j*c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a polynomial. + + Returns the polynomial coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients, from low to high degree along each axis, e.g., [1,2,3] + represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] + represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients, ordered from low to high. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + polyder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. Why + is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3) + >>> P.polyint(c) # should return array([0, 1, 1, 1]) + array([0., 1., 1., 1.]) + >>> P.polyint(c, 3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) + array([ 0. , 0. , 0. , 0.16666667, 0.08333333, # may vary + 0.05 ]) + >>> P.polyint(c, k=3) # should return array([3, 1, 1, 1]) + array([3., 1., 1., 1.]) + >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1]) + array([6., 1., 1., 1.]) + >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2]) + array([ 0., -2., -2., -2.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype doesn't preserve mask attribute. + c = c + 0.0 + cdt = c.dtype + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + k = list(k) + [0]*(cnt - len(k)) + c = np.moveaxis(c, iaxis, 0) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) + tmp[0] = c[0]*0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j]/(j + 1) + tmp[0] += k[i] - polyval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyval(x, c, tensor=True): + """ + Evaluate a polynomial at points x. + + If `c` is of length ``n + 1``, this function returns the value + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyval2d, polygrid2d, polyval3d, polygrid3d + + Notes + ----- + The evaluation uses Horner's method. + + Examples + -------- + >>> from numpy.polynomial.polynomial import polyval + >>> polyval(1, [1,2,3]) + 6.0 + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyval(a, [1, 2, 3]) + array([[ 1., 6.], + [17., 34.]]) + >>> coef = np.arange(4).reshape(2, 2) # multidimensional coefficients + >>> coef + array([[0, 1], + [2, 3]]) + >>> polyval([1, 2], coef, tensor=True) + array([[2., 4.], + [4., 7.]]) + >>> polyval([1, 2], coef, tensor=False) + array([2., 7.]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + c0 = c[-1] + x*0 + for i in range(2, len(c) + 1): + c0 = c[-i] + c0*x + return c0 + + +def polyvalfromroots(x, r, tensor=True): + """ + Evaluate a polynomial specified by its roots at points x. + + If `r` is of length ``N``, this function returns the value + + .. math:: p(x) = \\prod_{n=1}^{N} (x - r_n) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `r`. + + If `r` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If `r` + is multidimensional, then the shape of the result depends on the value of + `tensor`. If `tensor` is ``True`` the shape will be r.shape[1:] + x.shape; + that is, each polynomial is evaluated at every value of `x`. If `tensor` is + ``False``, the shape will be r.shape[1:]; that is, each polynomial is + evaluated only for the corresponding broadcast value of `x`. Note that + scalars have shape (,). + + .. versionadded:: 1.12 + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `r`. + r : array_like + Array of roots. If `r` is multidimensional the first index is the + root index, while the remaining indices enumerate multiple + polynomials. For instance, in the two dimensional case the roots + of each polynomial may be thought of as stored in the columns of `r`. + tensor : boolean, optional + If True, the shape of the roots array is extended with ones on the + right, one for each dimension of `x`. Scalars have dimension 0 for this + action. The result is that every column of coefficients in `r` is + evaluated for every element of `x`. If False, `x` is broadcast over the + columns of `r` for the evaluation. This keyword is useful when `r` is + multidimensional. The default value is True. + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyroots, polyfromroots, polyval + + Examples + -------- + >>> from numpy.polynomial.polynomial import polyvalfromroots + >>> polyvalfromroots(1, [1, 2, 3]) + 0.0 + >>> a = np.arange(4).reshape(2, 2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyvalfromroots(a, [-1, 0, 1]) + array([[-0., 0.], + [ 6., 24.]]) + >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients + >>> r # each column of r defines one polynomial + array([[-2, -1], + [ 0, 1]]) + >>> b = [-2, 1] + >>> polyvalfromroots(b, r, tensor=True) + array([[-0., 3.], + [ 3., 0.]]) + >>> polyvalfromroots(b, r, tensor=False) + array([-0., 0.]) + + """ + r = np.array(r, ndmin=1, copy=None) + if r.dtype.char in '?bBhHiIlLqQpP': + r = r.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray): + if tensor: + r = r.reshape(r.shape + (1,)*x.ndim) + elif x.ndim >= r.ndim: + raise ValueError("x.ndim must be < r.ndim when tensor == False") + return np.prod(x - r, axis=0) + + +def polyval2d(x, y, c): + """ + Evaluate a 2-D polynomial at points (x, y). + + This function returns the value + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + polyval, polygrid2d, polyval3d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6)) + >>> P.polyval2d(1, 1, c) + 21.0 + + """ + return pu._valnd(polyval, c, x, y) + + +def polygrid2d(x, y, c): + """ + Evaluate a 2-D polynomial on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polyval3d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6)) + >>> P.polygrid2d([0, 1], [0, 1], c) + array([[ 1., 6.], + [ 5., 21.]]) + + """ + return pu._gridnd(polyval, c, x, y) + + +def polyval3d(x, y, z, c): + """ + Evaluate a 3-D polynomial at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6), (7, 8, 9)) + >>> P.polyval3d(1, 1, 1, c) + 45.0 + + """ + return pu._valnd(polyval, c, x, y, z) + + +def polygrid3d(x, y, z, c): + """ + Evaluate a 3-D polynomial on the Cartesian product of x, y and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polyval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6), (7, 8, 9)) + >>> P.polygrid3d([0, 1], [0, 1], [0, 1], c) + array([[ 1., 13.], + [ 6., 51.]]) + + """ + return pu._gridnd(polyval, c, x, y, z) + + +def polyvander(x, deg): + """Vandermonde matrix of given degree. + + Returns the Vandermonde matrix of degree `deg` and sample points + `x`. The Vandermonde matrix is defined by + + .. math:: V[..., i] = x^i, + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the power of `x`. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and + ``polyval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of polynomials of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray. + The Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where the last index is the power of `x`. + The dtype will be the same as the converted `x`. + + See Also + -------- + polyvander2d, polyvander3d + + Examples + -------- + The Vandermonde matrix of degree ``deg = 5`` and sample points + ``x = [-1, 2, 3]`` contains the element-wise powers of `x` + from 0 to 5 as its columns. + + >>> from numpy.polynomial import polynomial as P + >>> x, deg = [-1, 2, 3], 5 + >>> P.polyvander(x=x, deg=deg) + array([[ 1., -1., 1., -1., 1., -1.], + [ 1., 2., 4., 8., 16., 32.], + [ 1., 3., 9., 27., 81., 243.]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i-1]*x + return np.moveaxis(v, 0, -1) + + +def polyvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = x^i * y^j, + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the powers of + `x` and `y`. + + If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + polyvander, polyvander3d, polyval2d, polyval3d + + Examples + -------- + The 2-D pseudo-Vandermonde matrix of degree ``[1, 2]`` and sample + points ``x = [-1, 2]`` and ``y = [1, 3]`` is as follows: + + >>> from numpy.polynomial import polynomial as P + >>> x = np.array([-1, 2]) + >>> y = np.array([1, 3]) + >>> m, n = 1, 2 + >>> deg = np.array([m, n]) + >>> V = P.polyvander2d(x=x, y=y, deg=deg) + >>> V + array([[ 1., 1., 1., -1., -1., -1.], + [ 1., 3., 9., 2., 6., 18.]]) + + We can verify the columns for any ``0 <= i <= m`` and ``0 <= j <= n``: + + >>> i, j = 0, 1 + >>> V[:, (deg[1]+1)*i + j] == x**i * y**j + array([ True, True]) + + The (1D) Vandermonde matrix of sample points ``x`` and degree ``m`` is a + special case of the (2D) pseudo-Vandermonde matrix with ``y`` points all + zero and degree ``[m, 0]``. + + >>> P.polyvander2d(x=x, y=0*x, deg=(m, 0)) == P.polyvander(x=x, deg=m) + array([[ True, True], + [ True, True]]) + + """ + return pu._vander_nd_flat((polyvander, polyvander), (x, y), deg) + + +def polyvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k, + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the powers of `x`, `y`, and `z`. + + If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + polyvander, polyvander3d, polyval2d, polyval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> x = np.asarray([-1, 2, 1]) + >>> y = np.asarray([1, -2, -3]) + >>> z = np.asarray([2, 2, 5]) + >>> l, m, n = [2, 2, 1] + >>> deg = [l, m, n] + >>> V = P.polyvander3d(x=x, y=y, z=z, deg=deg) + >>> V + array([[ 1., 2., 1., 2., 1., 2., -1., -2., -1., + -2., -1., -2., 1., 2., 1., 2., 1., 2.], + [ 1., 2., -2., -4., 4., 8., 2., 4., -4., + -8., 8., 16., 4., 8., -8., -16., 16., 32.], + [ 1., 5., -3., -15., 9., 45., 1., 5., -3., + -15., 9., 45., 1., 5., -3., -15., 9., 45.]]) + + We can verify the columns for any ``0 <= i <= l``, ``0 <= j <= m``, + and ``0 <= k <= n`` + + >>> i, j, k = 2, 1, 0 + >>> V[:, (m+1)*(n+1)*i + (n+1)*j + k] == x**i * y**j * z**k + array([ True, True, True]) + + """ + return pu._vander_nd_flat((polyvander, polyvander, polyvander), (x, y, z), deg) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least-squares fit of a polynomial to data. + + Return the coefficients of a polynomial of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (`M`,) + x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. + y : array_like, shape (`M`,) or (`M`, `K`) + y-coordinates of the sample points. Several sets of sample points + sharing the same x-coordinates can be (independently) fit with one + call to `polyfit` by passing in for `y` a 2-D array that contains + one data set per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than `rcond`, relative to the largest singular value, will be + ignored. The default value is ``len(x)*eps``, where `eps` is the + relative precision of the platform's float type, about 2e-16 in + most cases. + full : bool, optional + Switch determining the nature of the return value. When ``False`` + (the default) just the coefficients are returned; when ``True``, + diagnostic information from the singular value decomposition (used + to solve the fit's matrix equation) is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) + Polynomial coefficients ordered from low to high. If `y` was 2-D, + the coefficients in column `k` of `coef` represent the polynomial + fit to the data in `y`'s `k`-th column. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Raises + ------ + RankWarning + Raised if the matrix in the least-squares fit is rank deficient. + The warning is only raised if ``full == False``. The warnings can + be turned off by: + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + polyval : Evaluates a polynomial. + polyvander : Vandermonde matrix for powers. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the polynomial `p` that minimizes + the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) over-determined matrix equation: + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected (and `full` == ``False``), a `~exceptions.RankWarning` will be + raised. This means that the coefficient values may be poorly determined. + Fitting to a lower order polynomial will usually get rid of the warning + (but may not be what you want, of course; if you have independent + reason(s) for choosing the degree which isn't working, you may have to: + a) reconsider those reasons, and/or b) reconsider the quality of your + data). The `rcond` parameter can also be set to a value smaller than + its default, but the resulting fit may be spurious and have large + contributions from roundoff error. + + Polynomial fits using double precision tend to "fail" at about + (polynomial) degree 20. Fits using Chebyshev or Legendre series are + generally better conditioned, but much can still depend on the + distribution of the sample points and the smoothness of the data. If + the quality of the fit is inadequate, splines may be a good + alternative. + + Examples + -------- + >>> np.random.seed(123) + >>> from numpy.polynomial import polynomial as P + >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] + >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + Gaussian noise + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> np.random.seed(123) + >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 + array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) # may vary + >>> stats # note the large SSR, explaining the rather poor results + [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, # may vary + 0.28853036]), 1.1324274851176597e-014] + + Same thing without the added noise + + >>> y = x**3 - x + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 + array([-6.36925336e-18, -1.00000000e+00, -4.08053781e-16, 1.00000000e+00]) + >>> stats # note the minuscule SSR + [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, # may vary + 0.50443316, 0.28853036]), 1.1324274851176597e-014] + + """ + return pu._fit(polyvander, x, y, deg, rcond, full, w) + + +def polycompanion(c): + """ + Return the companion matrix of c. + + The companion matrix for power series cannot be made symmetric by + scaling the basis, so this function differs from those for the + orthogonal polynomials. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3) + >>> P.polycompanion(c) + array([[ 0. , -0.33333333], + [ 1. , -0.66666667]]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + bot = mat.reshape(-1)[n::n+1] + bot[...] = 1 + mat[:, -1] -= c[:-1]/c[-1] + return mat + + +def polyroots(c): + """ + Compute the roots of a polynomial. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * x^i. + + Parameters + ---------- + c : 1-D array_like + 1-D array of polynomial coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the polynomial. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the power series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + Examples + -------- + >>> import numpy.polynomial.polynomial as poly + >>> poly.polyroots(poly.polyfromroots((-1,0,1))) + array([-1., 0., 1.]) + >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype + dtype('float64') + >>> j = complex(0,1) + >>> poly.polyroots(poly.polyfromroots((-j,0,j))) + array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) # may vary + + """ # noqa: E501 + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + # rotated companion matrix reduces error + m = polycompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +# +# polynomial class +# + +class Polynomial(ABCPolyBase): + """A power series class. + + The Polynomial class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Polynomial coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(polyadd) + _sub = staticmethod(polysub) + _mul = staticmethod(polymul) + _div = staticmethod(polydiv) + _pow = staticmethod(polypow) + _val = staticmethod(polyval) + _int = staticmethod(polyint) + _der = staticmethod(polyder) + _fit = staticmethod(polyfit) + _line = staticmethod(polyline) + _roots = staticmethod(polyroots) + _fromroots = staticmethod(polyfromroots) + + # Virtual properties + domain = np.array(polydomain) + window = np.array(polydomain) + basis_name = None + + @classmethod + def _str_term_unicode(cls, i, arg_str): + if i == '1': + return f"·{arg_str}" + else: + return f"·{arg_str}{i.translate(cls._superscript_mapping)}" + + @staticmethod + def _str_term_ascii(i, arg_str): + if i == '1': + return f" {arg_str}" + else: + return f" {arg_str}**{i}" + + @staticmethod + def _repr_latex_term(i, arg_str, needs_parens): + if needs_parens: + arg_str = rf"\left({arg_str}\right)" + if i == 0: + return '1' + elif i == 1: + return arg_str + else: + return f"{arg_str}^{{{i}}}" diff --git a/phivenv/Lib/site-packages/numpy/polynomial/polynomial.pyi b/phivenv/Lib/site-packages/numpy/polynomial/polynomial.pyi new file mode 100644 index 0000000000000000000000000000000000000000..729321ad4885ae86afac455fab56781838bd54d3 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/polynomial.pyi @@ -0,0 +1,42 @@ +from typing import Any + +from numpy import int_ +from numpy.typing import NDArray +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +polytrim = trimcoef + +polydomain: NDArray[int_] +polyzero: NDArray[int_] +polyone: NDArray[int_] +polyx: NDArray[int_] + +def polyline(off, scl): ... +def polyfromroots(roots): ... +def polyadd(c1, c2): ... +def polysub(c1, c2): ... +def polymulx(c): ... +def polymul(c1, c2): ... +def polydiv(c1, c2): ... +def polypow(c, pow, maxpower=...): ... +def polyder(c, m=..., scl=..., axis=...): ... +def polyint(c, m=..., k=..., lbnd=..., scl=..., axis=...): ... +def polyval(x, c, tensor=...): ... +def polyvalfromroots(x, r, tensor=...): ... +def polyval2d(x, y, c): ... +def polygrid2d(x, y, c): ... +def polyval3d(x, y, z, c): ... +def polygrid3d(x, y, z, c): ... +def polyvander(x, deg): ... +def polyvander2d(x, y, deg): ... +def polyvander3d(x, y, z, deg): ... +def polyfit(x, y, deg, rcond=..., full=..., w=...): ... +def polyroots(c): ... + +class Polynomial(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/phivenv/Lib/site-packages/numpy/polynomial/polyutils.py b/phivenv/Lib/site-packages/numpy/polynomial/polyutils.py new file mode 100644 index 0000000000000000000000000000000000000000..7a3c629d39850b5c08c75a9e3fda02c57823feee --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/polyutils.py @@ -0,0 +1,753 @@ +""" +Utility classes and functions for the polynomial modules. + +This module provides: error and warning objects; a polynomial base class; +and some routines used in both the `polynomial` and `chebyshev` modules. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + as_series convert list of array_likes into 1-D arrays of common type. + trimseq remove trailing zeros. + trimcoef remove small trailing coefficients. + getdomain return the domain appropriate for a given set of abscissae. + mapdomain maps points between domains. + mapparms parameters of the linear map between domains. + +""" +import operator +import functools +import warnings + +import numpy as np + +from numpy._core.multiarray import dragon4_positional, dragon4_scientific +from numpy.exceptions import RankWarning + +__all__ = [ + 'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', + 'format_float'] + +# +# Helper functions to convert inputs to 1-D arrays +# +def trimseq(seq): + """Remove small Poly series coefficients. + + Parameters + ---------- + seq : sequence + Sequence of Poly series coefficients. + + Returns + ------- + series : sequence + Subsequence with trailing zeros removed. If the resulting sequence + would be empty, return the first element. The returned sequence may + or may not be a view. + + Notes + ----- + Do not lose the type info if the sequence contains unknown objects. + + """ + if len(seq) == 0 or seq[-1] != 0: + return seq + else: + for i in range(len(seq) - 1, -1, -1): + if seq[i] != 0: + break + return seq[:i+1] + + +def as_series(alist, trim=True): + """ + Return argument as a list of 1-d arrays. + + The returned list contains array(s) of dtype double, complex double, or + object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of + size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays + of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array + raises a Value Error if it is not first reshaped into either a 1-d or 2-d + array. + + Parameters + ---------- + alist : array_like + A 1- or 2-d array_like + trim : boolean, optional + When True, trailing zeros are removed from the inputs. + When False, the inputs are passed through intact. + + Returns + ------- + [a1, a2,...] : list of 1-D arrays + A copy of the input data as a list of 1-d arrays. + + Raises + ------ + ValueError + Raised when `as_series` cannot convert its input to 1-d arrays, or at + least one of the resulting arrays is empty. + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> a = np.arange(4) + >>> pu.as_series(a) + [array([0.]), array([1.]), array([2.]), array([3.])] + >>> b = np.arange(6).reshape((2,3)) + >>> pu.as_series(b) + [array([0., 1., 2.]), array([3., 4., 5.])] + + >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16))) + [array([1.]), array([0., 1., 2.]), array([0., 1.])] + + >>> pu.as_series([2, [1.1, 0.]]) + [array([2.]), array([1.1])] + + >>> pu.as_series([2, [1.1, 0.]], trim=False) + [array([2.]), array([1.1, 0. ])] + + """ + arrays = [np.array(a, ndmin=1, copy=None) for a in alist] + for a in arrays: + if a.size == 0: + raise ValueError("Coefficient array is empty") + if any(a.ndim != 1 for a in arrays): + raise ValueError("Coefficient array is not 1-d") + if trim: + arrays = [trimseq(a) for a in arrays] + + if any(a.dtype == np.dtype(object) for a in arrays): + ret = [] + for a in arrays: + if a.dtype != np.dtype(object): + tmp = np.empty(len(a), dtype=np.dtype(object)) + tmp[:] = a[:] + ret.append(tmp) + else: + ret.append(a.copy()) + else: + try: + dtype = np.common_type(*arrays) + except Exception as e: + raise ValueError("Coefficient arrays have no common type") from e + ret = [np.array(a, copy=True, dtype=dtype) for a in arrays] + return ret + + +def trimcoef(c, tol=0): + """ + Remove "small" "trailing" coefficients from a polynomial. + + "Small" means "small in absolute value" and is controlled by the + parameter `tol`; "trailing" means highest order coefficient(s), e.g., in + ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``) + both the 3-rd and 4-th order coefficients would be "trimmed." + + Parameters + ---------- + c : array_like + 1-d array of coefficients, ordered from lowest order to highest. + tol : number, optional + Trailing (i.e., highest order) elements with absolute value less + than or equal to `tol` (default value is zero) are removed. + + Returns + ------- + trimmed : ndarray + 1-d array with trailing zeros removed. If the resulting series + would be empty, a series containing a single zero is returned. + + Raises + ------ + ValueError + If `tol` < 0 + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.trimcoef((0,0,3,0,5,0,0)) + array([0., 0., 3., 0., 5.]) + >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed + array([0.]) + >>> i = complex(0,1) # works for complex + >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) + array([0.0003+0.j , 0.001 -0.001j]) + + """ + if tol < 0: + raise ValueError("tol must be non-negative") + + [c] = as_series([c]) + [ind] = np.nonzero(np.abs(c) > tol) + if len(ind) == 0: + return c[:1]*0 + else: + return c[:ind[-1] + 1].copy() + +def getdomain(x): + """ + Return a domain suitable for given abscissae. + + Find a domain suitable for a polynomial or Chebyshev series + defined at the values supplied. + + Parameters + ---------- + x : array_like + 1-d array of abscissae whose domain will be determined. + + Returns + ------- + domain : ndarray + 1-d array containing two values. If the inputs are complex, then + the two returned points are the lower left and upper right corners + of the smallest rectangle (aligned with the axes) in the complex + plane containing the points `x`. If the inputs are real, then the + two points are the ends of the smallest interval containing the + points `x`. + + See Also + -------- + mapparms, mapdomain + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> points = np.arange(4)**2 - 5; points + array([-5, -4, -1, 4]) + >>> pu.getdomain(points) + array([-5., 4.]) + >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle + >>> pu.getdomain(c) + array([-1.-1.j, 1.+1.j]) + + """ + [x] = as_series([x], trim=False) + if x.dtype.char in np.typecodes['Complex']: + rmin, rmax = x.real.min(), x.real.max() + imin, imax = x.imag.min(), x.imag.max() + return np.array((complex(rmin, imin), complex(rmax, imax))) + else: + return np.array((x.min(), x.max())) + +def mapparms(old, new): + """ + Linear map parameters between domains. + + Return the parameters of the linear map ``offset + scale*x`` that maps + `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``. + + Parameters + ---------- + old, new : array_like + Domains. Each domain must (successfully) convert to a 1-d array + containing precisely two values. + + Returns + ------- + offset, scale : scalars + The map ``L(x) = offset + scale*x`` maps the first domain to the + second. + + See Also + -------- + getdomain, mapdomain + + Notes + ----- + Also works for complex numbers, and thus can be used to calculate the + parameters required to map any line in the complex plane to any other + line therein. + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.mapparms((-1,1),(-1,1)) + (0.0, 1.0) + >>> pu.mapparms((1,-1),(-1,1)) + (-0.0, -1.0) + >>> i = complex(0,1) + >>> pu.mapparms((-i,-1),(1,i)) + ((1+1j), (1-0j)) + + """ + oldlen = old[1] - old[0] + newlen = new[1] - new[0] + off = (old[1]*new[0] - old[0]*new[1])/oldlen + scl = newlen/oldlen + return off, scl + +def mapdomain(x, old, new): + """ + Apply linear map to input points. + + The linear map ``offset + scale*x`` that maps the domain `old` to + the domain `new` is applied to the points `x`. + + Parameters + ---------- + x : array_like + Points to be mapped. If `x` is a subtype of ndarray the subtype + will be preserved. + old, new : array_like + The two domains that determine the map. Each must (successfully) + convert to 1-d arrays containing precisely two values. + + Returns + ------- + x_out : ndarray + Array of points of the same shape as `x`, after application of the + linear map between the two domains. + + See Also + -------- + getdomain, mapparms + + Notes + ----- + Effectively, this implements: + + .. math:: + x\\_out = new[0] + m(x - old[0]) + + where + + .. math:: + m = \\frac{new[1]-new[0]}{old[1]-old[0]} + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> old_domain = (-1,1) + >>> new_domain = (0,2*np.pi) + >>> x = np.linspace(-1,1,6); x + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) + >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out + array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary + 6.28318531]) + >>> x - pu.mapdomain(x_out, new_domain, old_domain) + array([0., 0., 0., 0., 0., 0.]) + + Also works for complex numbers (and thus can be used to map any line in + the complex plane to any other line therein). + + >>> i = complex(0,1) + >>> old = (-1 - i, 1 + i) + >>> new = (-1 + i, 1 - i) + >>> z = np.linspace(old[0], old[1], 6); z + array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ]) + >>> new_z = pu.mapdomain(z, old, new); new_z + array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary + + """ + x = np.asanyarray(x) + off, scl = mapparms(old, new) + return off + scl*x + + +def _nth_slice(i, ndim): + sl = [np.newaxis] * ndim + sl[i] = slice(None) + return tuple(sl) + + +def _vander_nd(vander_fs, points, degrees): + r""" + A generalization of the Vandermonde matrix for N dimensions + + The result is built by combining the results of 1d Vandermonde matrices, + + .. math:: + W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{V_k(x_k)[i_0, \ldots, i_M, j_k]} + + where + + .. math:: + N &= \texttt{len(points)} = \texttt{len(degrees)} = \texttt{len(vander\_fs)} \\ + M &= \texttt{points[k].ndim} \\ + V_k &= \texttt{vander\_fs[k]} \\ + x_k &= \texttt{points[k]} \\ + 0 \le j_k &\le \texttt{degrees[k]} + + Expanding the one-dimensional :math:`V_k` functions gives: + + .. math:: + W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \ldots, i_M])} + + where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along + dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`. + + Parameters + ---------- + vander_fs : Sequence[function(array_like, int) -> ndarray] + The 1d vander function to use for each axis, such as ``polyvander`` + points : Sequence[array_like] + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + This must be the same length as `vander_fs`. + degrees : Sequence[int] + The maximum degree (inclusive) to use for each axis. + This must be the same length as `vander_fs`. + + Returns + ------- + vander_nd : ndarray + An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``. + """ + n_dims = len(vander_fs) + if n_dims != len(points): + raise ValueError( + f"Expected {n_dims} dimensions of sample points, got {len(points)}") + if n_dims != len(degrees): + raise ValueError( + f"Expected {n_dims} dimensions of degrees, got {len(degrees)}") + if n_dims == 0: + raise ValueError("Unable to guess a dtype or shape when no points are given") + + # convert to the same shape and type + points = tuple(np.asarray(tuple(points)) + 0.0) + + # produce the vandermonde matrix for each dimension, placing the last + # axis of each in an independent trailing axis of the output + vander_arrays = ( + vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)] + for i in range(n_dims) + ) + + # we checked this wasn't empty already, so no `initial` needed + return functools.reduce(operator.mul, vander_arrays) + + +def _vander_nd_flat(vander_fs, points, degrees): + """ + Like `_vander_nd`, but flattens the last ``len(degrees)`` axes into a single axis + + Used to implement the public ``vanderd`` functions. + """ + v = _vander_nd(vander_fs, points, degrees) + return v.reshape(v.shape[:-len(degrees)] + (-1,)) + + +def _fromroots(line_f, mul_f, roots): + """ + Helper function used to implement the ``fromroots`` functions. + + Parameters + ---------- + line_f : function(float, float) -> ndarray + The ``line`` function, such as ``polyline`` + mul_f : function(array_like, array_like) -> ndarray + The ``mul`` function, such as ``polymul`` + roots + See the ``fromroots`` functions for more detail + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = as_series([roots], trim=False) + roots.sort() + p = [line_f(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [mul_f(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = mul_f(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def _valnd(val_f, c, *args): + """ + Helper function used to implement the ``vald`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``val`` function, such as ``polyval`` + c, args + See the ``vald`` functions for more detail + """ + args = [np.asanyarray(a) for a in args] + shape0 = args[0].shape + if not all((a.shape == shape0 for a in args[1:])): + if len(args) == 3: + raise ValueError('x, y, z are incompatible') + elif len(args) == 2: + raise ValueError('x, y are incompatible') + else: + raise ValueError('ordinates are incompatible') + it = iter(args) + x0 = next(it) + + # use tensor on only the first + c = val_f(x0, c) + for xi in it: + c = val_f(xi, c, tensor=False) + return c + + +def _gridnd(val_f, c, *args): + """ + Helper function used to implement the ``gridd`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``val`` function, such as ``polyval`` + c, args + See the ``gridd`` functions for more detail + """ + for xi in args: + c = val_f(xi, c) + return c + + +def _div(mul_f, c1, c2): + """ + Helper function used to implement the ``div`` functions. + + Implementation uses repeated subtraction of c2 multiplied by the nth basis. + For some polynomial types, a more efficient approach may be possible. + + Parameters + ---------- + mul_f : function(array_like, array_like) -> array_like + The ``mul`` function, such as ``polymul`` + c1, c2 + See the ``div`` functions for more detail + """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = mul_f([0]*i + [1], c2) + q = rem[-1]/p[-1] + rem = rem[:-1] - q*p[:-1] + quo[i] = q + return quo, trimseq(rem) + + +def _add(c1, c2): + """ Helper function used to implement the ``add`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _sub(c1, c2): + """ Helper function used to implement the ``sub`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): + """ + Helper function used to implement the ``fit`` functions. + + Parameters + ---------- + vander_f : function(array_like, int) -> ndarray + The 1d vander function, such as ``polyvander`` + c1, c2 + See the ``fit`` functions for more detail + """ + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") + if deg.min() < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + if deg.ndim == 0: + lmax = deg + order = lmax + 1 + van = vander_f(x, lmax) + else: + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = vander_f(x, lmax)[:, deg] + + # set up the least squares matrices in transposed form + lhs = van.T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # Expand c to include non-fitted coefficients which are set to zero + if deg.ndim > 0: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, RankWarning, stacklevel=2) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def _pow(mul_f, c, pow, maxpower): + """ + Helper function used to implement the ``pow`` functions. + + Parameters + ---------- + mul_f : function(array_like, array_like) -> ndarray + The ``mul`` function, such as ``polymul`` + c : array_like + 1-D array of array of series coefficients + pow, maxpower + See the ``pow`` functions for more detail + """ + # c is a trimmed copy + [c] = as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = mul_f(prd, c) + return prd + + +def _as_int(x, desc): + """ + Like `operator.index`, but emits a custom exception when passed an + incorrect type + + Parameters + ---------- + x : int-like + Value to interpret as an integer + desc : str + description to include in any error message + + Raises + ------ + TypeError : if x is a float or non-numeric + """ + try: + return operator.index(x) + except TypeError as e: + raise TypeError(f"{desc} must be an integer, received {x}") from e + + +def format_float(x, parens=False): + if not np.issubdtype(type(x), np.floating): + return str(x) + + opts = np.get_printoptions() + + if np.isnan(x): + return opts['nanstr'] + elif np.isinf(x): + return opts['infstr'] + + exp_format = False + if x != 0: + a = np.abs(x) + if a >= 1.e8 or a < 10**min(0, -(opts['precision']-1)//2): + exp_format = True + + trim, unique = '0', True + if opts['floatmode'] == 'fixed': + trim, unique = 'k', False + + if exp_format: + s = dragon4_scientific(x, precision=opts['precision'], + unique=unique, trim=trim, + sign=opts['sign'] == '+') + if parens: + s = '(' + s + ')' + else: + s = dragon4_positional(x, precision=opts['precision'], + fractional=True, + unique=unique, trim=trim, + sign=opts['sign'] == '+') + return s diff --git a/phivenv/Lib/site-packages/numpy/polynomial/polyutils.pyi b/phivenv/Lib/site-packages/numpy/polynomial/polyutils.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2c4d6d19322a2284ea91b659cdd8b92b5dbafabc --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/polyutils.pyi @@ -0,0 +1,9 @@ +__all__: list[str] + +def trimseq(seq): ... +def as_series(alist, trim=...): ... +def trimcoef(c, tol=...): ... +def getdomain(x): ... +def mapparms(old, new): ... +def mapdomain(x, old, new): ... +def format_float(x, parens=...): ... diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/__init__.py b/phivenv/Lib/site-packages/numpy/polynomial/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/__init__.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a081c3d7daa1f360ccc2701727967e2060eef14 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c1ccf88431cd831d6fa8c8ed825e13aecebb43e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_classes.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_classes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..295a2e97f9238932bd181f8b0dc26081176b3c59 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_classes.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_hermite.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_hermite.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3781cd71273aece7749ad49b3a2510451beb4fd8 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_hermite.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7d92f264409c01d40c07901f7f7d405ff16cee0 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_laguerre.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_laguerre.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e3259eb0541201b2da7bcc2205079826ba748dc Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_laguerre.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_legendre.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_legendre.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a813f60d6afa3d72236edc62c13f4225d2f4d5d8 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_legendre.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_polynomial.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_polynomial.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba353a570ced9c41d609c81a086c59b5c5d9455e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_polynomial.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_polyutils.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_polyutils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70e49268bfdddb8554da0272fa19cea60db6483c Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_polyutils.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_printing.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_printing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17f44ac45a386ab724c67ec98af389f161685c4e Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_printing.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_symbol.cpython-39.pyc b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_symbol.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18a1b74e357e320b9f8b15c69ee7a7fdf215bf94 Binary files /dev/null and b/phivenv/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_symbol.cpython-39.pyc differ diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/test_chebyshev.py b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_chebyshev.py new file mode 100644 index 0000000000000000000000000000000000000000..a3f8b96f83360e101f61912832e4b2cda8b0a320 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_chebyshev.py @@ -0,0 +1,619 @@ +"""Tests for chebyshev module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.chebyshev as cheb +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + + +def trim(x): + return cheb.chebtrim(x, tol=1e-6) + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestPrivate: + + def test__cseries_to_zseries(self): + for i in range(5): + inp = np.array([2] + [1]*i, np.double) + tgt = np.array([.5]*i + [2] + [.5]*i, np.double) + res = cheb._cseries_to_zseries(inp) + assert_equal(res, tgt) + + def test__zseries_to_cseries(self): + for i in range(5): + inp = np.array([.5]*i + [2] + [.5]*i, np.double) + tgt = np.array([2] + [1]*i, np.double) + res = cheb._zseries_to_cseries(inp) + assert_equal(res, tgt) + + +class TestConstants: + + def test_chebdomain(self): + assert_equal(cheb.chebdomain, [-1, 1]) + + def test_chebzero(self): + assert_equal(cheb.chebzero, [0]) + + def test_chebone(self): + assert_equal(cheb.chebone, [1]) + + def test_chebx(self): + assert_equal(cheb.chebx, [0, 1]) + + +class TestArithmetic: + + def test_chebadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = cheb.chebadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = cheb.chebsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebmulx(self): + assert_equal(cheb.chebmulx([0]), [0]) + assert_equal(cheb.chebmulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [.5, 0, .5] + assert_equal(cheb.chebmulx(ser), tgt) + + def test_chebmul(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(i + j + 1) + tgt[i + j] += .5 + tgt[abs(i - j)] += .5 + res = cheb.chebmul([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = cheb.chebadd(ci, cj) + quo, rem = cheb.chebdiv(tgt, ci) + res = cheb.chebadd(cheb.chebmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(cheb.chebmul, [c]*j, np.array([1])) + res = cheb.chebpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 2., 1.5]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_chebval(self): + #check empty input + assert_equal(cheb.chebval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Tlist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = cheb.chebval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(cheb.chebval(x, [1]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims) + + def test_chebval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = cheb.chebval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_chebval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = cheb.chebval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_chebgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = cheb.chebgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_chebgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = cheb.chebgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_chebint(self): + # check exceptions + assert_raises(TypeError, cheb.chebint, [0], .5) + assert_raises(ValueError, cheb.chebint, [0], -1) + assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) + assert_raises(ValueError, cheb.chebint, [0], lbnd=[0]) + assert_raises(ValueError, cheb.chebint, [0], scl=[0]) + assert_raises(TypeError, cheb.chebint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = cheb.chebint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i]) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(cheb.chebval(-1, chebint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1) + res = cheb.chebint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k]) + res = cheb.chebint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1) + res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], scl=2) + res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T + res = cheb.chebint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c) for c in c2d]) + res = cheb.chebint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d]) + res = cheb.chebint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_chebder(self): + # check exceptions + assert_raises(TypeError, cheb.chebder, [0], .5) + assert_raises(ValueError, cheb.chebder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = cheb.chebder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T + res = cheb.chebder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebder(c) for c in c2d]) + res = cheb.chebder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_chebvander(self): + # check for 1d x + x = np.arange(3) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + def test_chebvander2d(self): + # also tests chebval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = cheb.chebvander2d(x1, x2, [1, 2]) + tgt = cheb.chebval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_chebvander3d(self): + # also tests chebval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3]) + tgt = cheb.chebval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_chebfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, cheb.chebfit, [1], [1], -1) + assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) + assert_raises(TypeError, cheb.chebfit, [], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) + assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, cheb.chebfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = cheb.chebfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + coef3 = cheb.chebfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + # + coef4 = cheb.chebfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # + coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = cheb.chebfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1]) + assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1]) + # test fitting only even polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = cheb.chebfit(x, y, 4) + assert_almost_equal(cheb.chebval(x, coef1), y) + coef2 = cheb.chebfit(x, y, [0, 2, 4]) + assert_almost_equal(cheb.chebval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestInterpolate: + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, cheb.chebinterpolate, self.f, -1) + assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,)) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(-1, 1, 10) + for deg in range(0, 10): + for p in range(0, deg + 1): + c = cheb.chebinterpolate(powx, deg, (p,)) + assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, cheb.chebcompanion, []) + assert_raises(ValueError, cheb.chebcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(cheb.chebcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = cheb.chebgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = cheb.chebvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.pi + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_chebfromroots(self): + res = cheb.chebfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + tgt = [0]*i + [1] + res = cheb.chebfromroots(roots)*2**(i-1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebroots(self): + assert_almost_equal(cheb.chebroots([1]), []) + assert_almost_equal(cheb.chebroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = cheb.chebroots(cheb.chebfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, cheb.chebtrim, coef, -1) + + # Test results + assert_equal(cheb.chebtrim(coef), coef[:-1]) + assert_equal(cheb.chebtrim(coef, 1), coef[:-3]) + assert_equal(cheb.chebtrim(coef, 2), [0]) + + def test_chebline(self): + assert_equal(cheb.chebline(3, 4), [3, 4]) + + def test_cheb2poly(self): + for i in range(10): + assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i]) + + def test_poly2cheb(self): + for i in range(10): + assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11)[1:-1] + tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x)) + res = cheb.chebweight(x) + assert_almost_equal(res, tgt) + + def test_chebpts1(self): + #test exceptions + assert_raises(ValueError, cheb.chebpts1, 1.5) + assert_raises(ValueError, cheb.chebpts1, 0) + + #test points + tgt = [0] + assert_almost_equal(cheb.chebpts1(1), tgt) + tgt = [-0.70710678118654746, 0.70710678118654746] + assert_almost_equal(cheb.chebpts1(2), tgt) + tgt = [-0.86602540378443871, 0, 0.86602540378443871] + assert_almost_equal(cheb.chebpts1(3), tgt) + tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] + assert_almost_equal(cheb.chebpts1(4), tgt) + + def test_chebpts2(self): + #test exceptions + assert_raises(ValueError, cheb.chebpts2, 1.5) + assert_raises(ValueError, cheb.chebpts2, 1) + + #test points + tgt = [-1, 1] + assert_almost_equal(cheb.chebpts2(2), tgt) + tgt = [-1, 0, 1] + assert_almost_equal(cheb.chebpts2(3), tgt) + tgt = [-1, -0.5, .5, 1] + assert_almost_equal(cheb.chebpts2(4), tgt) + tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0] + assert_almost_equal(cheb.chebpts2(5), tgt) diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/test_classes.py b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..48f370ad5d407f5d7bafe730d695b7cbd9c058a6 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_classes.py @@ -0,0 +1,607 @@ +"""Test inter-conversion of different polynomial classes. + +This tests the convert and cast methods of all the polynomial classes. + +""" +import operator as op +from numbers import Number + +import pytest +import numpy as np +from numpy.polynomial import ( + Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) +from numpy.exceptions import RankWarning + +# +# fixtures +# + +classes = ( + Polynomial, Legendre, Chebyshev, Laguerre, + Hermite, HermiteE + ) +classids = tuple(cls.__name__ for cls in classes) + +@pytest.fixture(params=classes, ids=classids) +def Poly(request): + return request.param + +# +# helper functions +# +random = np.random.random + + +def assert_poly_almost_equal(p1, p2, msg=""): + try: + assert_(np.all(p1.domain == p2.domain)) + assert_(np.all(p1.window == p2.window)) + assert_almost_equal(p1.coef, p2.coef) + except AssertionError: + msg = f"Result: {p1}\nTarget: {p2}" + raise AssertionError(msg) + + +# +# Test conversion methods that depend on combinations of two classes. +# + +Poly1 = Poly +Poly2 = Poly + + +def test_conversion(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,))*.25 + w1 = Poly1.window + random((2,))*.25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,))*.25 + w2 = Poly2.window + random((2,))*.25 + p2 = p1.convert(kind=Poly2, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +def test_cast(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,))*.25 + w1 = Poly1.window + random((2,))*.25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,))*.25 + w2 = Poly2.window + random((2,))*.25 + p2 = Poly2.cast(p1, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +# +# test methods that depend on one class +# + + +def test_identity(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + x = np.linspace(d[0], d[1], 11) + p = Poly.identity(domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_almost_equal(p(x), x) + + +def test_basis(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly.basis(5, domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_equal(p.coef, [0]*5 + [1]) + + +def test_fromroots(Poly): + # check that requested roots are zeros of a polynomial + # of correct degree, domain, and window. + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + r = random((5,)) + p1 = Poly.fromroots(r, domain=d, window=w) + assert_equal(p1.degree(), len(r)) + assert_equal(p1.domain, d) + assert_equal(p1.window, w) + assert_almost_equal(p1(r), 0) + + # check that polynomial is monic + pdom = Polynomial.domain + pwin = Polynomial.window + p2 = Polynomial.cast(p1, domain=pdom, window=pwin) + assert_almost_equal(p2.coef[-1], 1) + + +def test_bad_conditioned_fit(Poly): + + x = [0., 0., 1.] + y = [1., 2., 3.] + + # check RankWarning is raised + with pytest.warns(RankWarning) as record: + Poly.fit(x, y, 2) + assert record[0].message.args[0] == "The fit may be poorly conditioned" + + +def test_fit(Poly): + + def f(x): + return x*(x - 1)*(x - 2) + x = np.linspace(0, 3) + y = f(x) + + # check default value of domain and window + p = Poly.fit(x, y, 3) + assert_almost_equal(p.domain, [0, 3]) + assert_almost_equal(p(x), y) + assert_equal(p.degree(), 3) + + # check with given domains and window + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly.fit(x, y, 3, domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + + # check with class domain default + p = Poly.fit(x, y, 3, []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + p = Poly.fit(x, y, [0, 1, 2, 3], []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + + # check that fit accepts weights. + w = np.zeros_like(x) + z = y + random(y.shape)*.25 + w[::2] = 1 + p1 = Poly.fit(x[::2], z[::2], 3) + p2 = Poly.fit(x, z, 3, w=w) + p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w) + assert_almost_equal(p1(x), p2(x)) + assert_almost_equal(p2(x), p3(x)) + + +def test_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(p1 == p1) + assert_(not p1 == p2) + assert_(not p1 == p3) + assert_(not p1 == p4) + + +def test_not_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(not p1 != p1) + assert_(p1 != p2) + assert_(p1 != p3) + assert_(p1 != p4) + + +def test_add(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 + p2 + assert_poly_almost_equal(p2 + p1, p3) + assert_poly_almost_equal(p1 + c2, p3) + assert_poly_almost_equal(c2 + p1, p3) + assert_poly_almost_equal(p1 + tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) + p1, p3) + assert_poly_almost_equal(p1 + np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) + p1, p3) + assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.add, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.add, p1, Polynomial([0])) + + +def test_sub(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 - p2 + assert_poly_almost_equal(p2 - p1, -p3) + assert_poly_almost_equal(p1 - c2, p3) + assert_poly_almost_equal(c2 - p1, -p3) + assert_poly_almost_equal(p1 - tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) - p1, -p3) + assert_poly_almost_equal(p1 - np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) - p1, -p3) + assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.sub, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.sub, p1, Polynomial([0])) + + +def test_mul(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 * p2 + assert_poly_almost_equal(p2 * p1, p3) + assert_poly_almost_equal(p1 * c2, p3) + assert_poly_almost_equal(c2 * p1, p3) + assert_poly_almost_equal(p1 * tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) * p1, p3) + assert_poly_almost_equal(p1 * np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) * p1, p3) + assert_poly_almost_equal(p1 * 2, p1 * Poly([2])) + assert_poly_almost_equal(2 * p1, p1 * Poly([2])) + assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mul, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mul, p1, Polynomial([0])) + + +def test_floordiv(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 // p2, p1) + assert_poly_almost_equal(p4 // c2, p1) + assert_poly_almost_equal(c4 // p2, p1) + assert_poly_almost_equal(p4 // tuple(c2), p1) + assert_poly_almost_equal(tuple(c4) // p2, p1) + assert_poly_almost_equal(p4 // np.array(c2), p1) + assert_poly_almost_equal(np.array(c4) // p2, p1) + assert_poly_almost_equal(2 // p2, Poly([0])) + assert_poly_almost_equal(p2 // 2, 0.5*p2) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.floordiv, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.floordiv, p1, Polynomial([0])) + + +def test_truediv(Poly): + # true division is valid only if the denominator is a Number and + # not a python bool. + p1 = Poly([1,2,3]) + p2 = p1 * 5 + + for stype in np.ScalarType: + if not issubclass(stype, Number) or issubclass(stype, bool): + continue + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in (int, float): + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in [complex]: + s = stype(5, 0) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for s in [tuple(), list(), dict(), bool(), np.array([1])]: + assert_raises(TypeError, op.truediv, p2, s) + assert_raises(TypeError, op.truediv, s, p2) + for ptype in classes: + assert_raises(TypeError, op.truediv, p2, ptype(1)) + + +def test_mod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 % p2, p3) + assert_poly_almost_equal(p4 % c2, p3) + assert_poly_almost_equal(c4 % p2, p3) + assert_poly_almost_equal(p4 % tuple(c2), p3) + assert_poly_almost_equal(tuple(c4) % p2, p3) + assert_poly_almost_equal(p4 % np.array(c2), p3) + assert_poly_almost_equal(np.array(c4) % p2, p3) + assert_poly_almost_equal(2 % p2, Poly([2])) + assert_poly_almost_equal(p2 % 2, Poly([0])) + assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mod, p1, Polynomial([0])) + + +def test_divmod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + quo, rem = divmod(p4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, c2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(c4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, tuple(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(tuple(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, np.array(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(np.array(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p2, 2) + assert_poly_almost_equal(quo, 0.5*p2) + assert_poly_almost_equal(rem, Poly([0])) + quo, rem = divmod(2, p2) + assert_poly_almost_equal(quo, Poly([0])) + assert_poly_almost_equal(rem, Poly([2])) + assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, divmod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, divmod, p1, Polynomial([0])) + + +def test_roots(Poly): + d = Poly.domain * 1.25 + .25 + w = Poly.window + tgt = np.linspace(d[0], d[1], 5) + res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) + assert_almost_equal(res, tgt) + # default domain and window + res = np.sort(Poly.fromroots(tgt).roots()) + assert_almost_equal(res, tgt) + + +def test_degree(Poly): + p = Poly.basis(5) + assert_equal(p.degree(), 5) + + +def test_copy(Poly): + p1 = Poly.basis(5) + p2 = p1.copy() + assert_(p1 == p2) + assert_(p1 is not p2) + assert_(p1.coef is not p2.coef) + assert_(p1.domain is not p2.domain) + assert_(p1.window is not p2.window) + + +def test_integ(Poly): + P = Polynomial + # Check defaults + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + # Check with k + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ(k=1)) + p2 = P.cast(p0.integ(2, k=[1, 1])) + assert_poly_almost_equal(p1, P([1, 2, 3, 4])) + assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) + # Check with lbnd + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ(lbnd=1)) + p2 = P.cast(p0.integ(2, lbnd=1)) + assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) + assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) + # Check scaling + d = 2*Poly.domain + p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + + +def test_deriv(Poly): + # Check that the derivative is the inverse of integration. It is + # assumes that the integration has been checked elsewhere. + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p1 = Poly([1, 2, 3], domain=d, window=w) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + # default domain and window + p1 = Poly([1, 2, 3]) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + + +def test_linspace(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly([1, 2, 3], domain=d, window=w) + # check default domain + xtgt = np.linspace(d[0], d[1], 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + # check specified domain + xtgt = np.linspace(0, 2, 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20, domain=[0, 2]) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + + +def test_pow(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + tgt = Poly([1], domain=d, window=w) + tst = Poly([1, 2, 3], domain=d, window=w) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # default domain and window + tgt = Poly([1]) + tst = Poly([1, 2, 3]) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # check error for invalid powers + assert_raises(ValueError, op.pow, tgt, 1.5) + assert_raises(ValueError, op.pow, tgt, -1) + + +def test_call(Poly): + P = Polynomial + d = Poly.domain + x = np.linspace(d[0], d[1], 11) + + # Check defaults + p = Poly.cast(P([1, 2, 3])) + tgt = 1 + x*(2 + 3*x) + res = p(x) + assert_almost_equal(res, tgt) + + +def test_call_with_list(Poly): + p = Poly([1, 2, 3]) + x = [-1, 0, 2] + res = p(x) + assert_equal(res, p(np.array(x))) + + +def test_cutdeg(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.cutdeg, .5) + assert_raises(ValueError, p.cutdeg, -1) + assert_equal(len(p.cutdeg(3)), 3) + assert_equal(len(p.cutdeg(2)), 3) + assert_equal(len(p.cutdeg(1)), 2) + assert_equal(len(p.cutdeg(0)), 1) + + +def test_truncate(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.truncate, .5) + assert_raises(ValueError, p.truncate, 0) + assert_equal(len(p.truncate(4)), 3) + assert_equal(len(p.truncate(3)), 3) + assert_equal(len(p.truncate(2)), 2) + assert_equal(len(p.truncate(1)), 1) + + +def test_trim(Poly): + c = [1, 1e-6, 1e-12, 0] + p = Poly(c) + assert_equal(p.trim().coef, c[:3]) + assert_equal(p.trim(1e-10).coef, c[:2]) + assert_equal(p.trim(1e-5).coef, c[:1]) + + +def test_mapparms(Poly): + # check with defaults. Should be identity. + d = Poly.domain + w = Poly.window + p = Poly([1], domain=d, window=w) + assert_almost_equal([0, 1], p.mapparms()) + # + w = 2*d + 1 + p = Poly([1], domain=d, window=w) + assert_almost_equal([1, 2], p.mapparms()) + + +def test_ufunc_override(Poly): + p = Poly([1, 2, 3]) + x = np.ones(3) + assert_raises(TypeError, np.add, p, x) + assert_raises(TypeError, np.add, x, p) + + +# +# Test class method that only exists for some classes +# + + +class TestInterpolate: + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, Chebyshev.interpolate, self.f, -1) + assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(Chebyshev.interpolate(self.f, deg).degree() == deg) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(0, 2, 10) + for deg in range(0, 10): + for t in range(0, deg + 1): + p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) + assert_almost_equal(p(x), powx(x, t), decimal=11) diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/test_hermite.py b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_hermite.py new file mode 100644 index 0000000000000000000000000000000000000000..c6cdc98a29b4b56a7d7d4a0b25f30fbf07221ab7 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_hermite.py @@ -0,0 +1,555 @@ +"""Tests for hermite module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite as herm +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +H0 = np.array([1]) +H1 = np.array([0, 2]) +H2 = np.array([-2, 0, 4]) +H3 = np.array([0, -12, 0, 8]) +H4 = np.array([12, 0, -48, 0, 16]) +H5 = np.array([0, 120, 0, -160, 0, 32]) +H6 = np.array([-120, 0, 720, 0, -480, 0, 64]) +H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128]) +H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256]) +H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512]) + +Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9] + + +def trim(x): + return herm.hermtrim(x, tol=1e-6) + + +class TestConstants: + + def test_hermdomain(self): + assert_equal(herm.hermdomain, [-1, 1]) + + def test_hermzero(self): + assert_equal(herm.hermzero, [0]) + + def test_hermone(self): + assert_equal(herm.hermone, [1]) + + def test_hermx(self): + assert_equal(herm.hermx, [0, .5]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_hermadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herm.hermadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herm.hermsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermmulx(self): + assert_equal(herm.hermmulx([0]), [0]) + assert_equal(herm.hermmulx([1]), [0, .5]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i, 0, .5] + assert_equal(herm.hermmulx(ser), tgt) + + def test_hermmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = herm.hermval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0]*j + [1] + val2 = herm.hermval(self.x, pol2) + pol3 = herm.hermmul(pol1, pol2) + val3 = herm.hermval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_hermdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = herm.hermadd(ci, cj) + quo, rem = herm.hermdiv(tgt, ci) + res = herm.hermadd(herm.hermmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(herm.hermmul, [c]*j, np.array([1])) + res = herm.hermpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 1., .75]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermval(self): + #check empty input + assert_equal(herm.hermval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Hlist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = herm.hermval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(herm.hermval(x, [1]).shape, dims) + assert_equal(herm.hermval(x, [1, 0]).shape, dims) + assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims) + + def test_hermval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = herm.hermval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = herm.hermval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herm.hermgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_hermgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herm.hermgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_hermint(self): + # check exceptions + assert_raises(TypeError, herm.hermint, [0], .5) + assert_raises(ValueError, herm.hermint, [0], -1) + assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) + assert_raises(ValueError, herm.hermint, [0], lbnd=[0]) + assert_raises(ValueError, herm.hermint, [0], scl=[0]) + assert_raises(TypeError, herm.hermint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = herm.hermint([0], m=i, k=k) + assert_almost_equal(res, [0, .5]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i]) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herm.hermval(-1, hermint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1) + res = herm.hermint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k]) + res = herm.hermint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) + res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], scl=2) + res = herm.hermint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T + res = herm.hermint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c) for c in c2d]) + res = herm.hermint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c, k=3) for c in c2d]) + res = herm.hermint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_hermder(self): + # check exceptions + assert_raises(TypeError, herm.hermder, [0], .5) + assert_raises(ValueError, herm.hermder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = herm.hermder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herm.hermder(herm.hermint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T + res = herm.hermder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermder(c) for c in c2d]) + res = herm.hermder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_hermvander(self): + # check for 1d x + x = np.arange(3) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + def test_hermvander2d(self): + # also tests hermval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herm.hermvander2d(x1, x2, [1, 2]) + tgt = herm.hermval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermvander3d(self): + # also tests hermval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herm.hermvander3d(x1, x2, x3, [1, 2, 3]) + tgt = herm.hermval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_hermfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herm.hermfit, [1], [1], -1) + assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) + assert_raises(TypeError, herm.hermfit, [], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) + assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herm.hermfit, [1], [1], [-1,]) + assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herm.hermfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herm.hermfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + coef3 = herm.hermfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + # + coef4 = herm.hermfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # + coef2d = herm.hermfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herm.hermfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herm.hermfit(x, x, 1), [0, .5]) + assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herm.hermfit(x, y, 4) + assert_almost_equal(herm.hermval(x, coef1), y) + coef2 = herm.hermfit(x, y, [0, 2, 4]) + assert_almost_equal(herm.hermval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, herm.hermcompanion, []) + assert_raises(ValueError, herm.hermcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(herm.hermcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herm.hermcompanion([1, 2])[0, 0] == -.25) + + +class TestGauss: + + def test_100(self): + x, w = herm.hermgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herm.hermvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_hermfromroots(self): + res = herm.hermfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = herm.hermfromroots(roots) + res = herm.hermval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herm.herm2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermroots(self): + assert_almost_equal(herm.hermroots([1]), []) + assert_almost_equal(herm.hermroots([1, 1]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herm.hermroots(herm.hermfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herm.hermtrim, coef, -1) + + # Test results + assert_equal(herm.hermtrim(coef), coef[:-1]) + assert_equal(herm.hermtrim(coef, 1), coef[:-3]) + assert_equal(herm.hermtrim(coef, 2), [0]) + + def test_hermline(self): + assert_equal(herm.hermline(3, 4), [3, 2]) + + def test_herm2poly(self): + for i in range(10): + assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i]) + + def test_poly2herm(self): + for i in range(10): + assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-x**2) + res = herm.hermweight(x) + assert_almost_equal(res, tgt) diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/test_hermite_e.py b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_hermite_e.py new file mode 100644 index 0000000000000000000000000000000000000000..5b92f011bbcccf9c71bc767954b2cfa136e7db3a --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_hermite_e.py @@ -0,0 +1,556 @@ +"""Tests for hermite_e module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite_e as herme +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +He0 = np.array([1]) +He1 = np.array([0, 1]) +He2 = np.array([-1, 0, 1]) +He3 = np.array([0, -3, 0, 1]) +He4 = np.array([3, 0, -6, 0, 1]) +He5 = np.array([0, 15, 0, -10, 0, 1]) +He6 = np.array([-15, 0, 45, 0, -15, 0, 1]) +He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1]) +He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1]) +He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1]) + +Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] + + +def trim(x): + return herme.hermetrim(x, tol=1e-6) + + +class TestConstants: + + def test_hermedomain(self): + assert_equal(herme.hermedomain, [-1, 1]) + + def test_hermezero(self): + assert_equal(herme.hermezero, [0]) + + def test_hermeone(self): + assert_equal(herme.hermeone, [1]) + + def test_hermex(self): + assert_equal(herme.hermex, [0, 1]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_hermeadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herme.hermeadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermesub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herme.hermesub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermemulx(self): + assert_equal(herme.hermemulx([0]), [0]) + assert_equal(herme.hermemulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i, 0, 1] + assert_equal(herme.hermemulx(ser), tgt) + + def test_hermemul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = herme.hermeval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0]*j + [1] + val2 = herme.hermeval(self.x, pol2) + pol3 = herme.hermemul(pol1, pol2) + val3 = herme.hermeval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_hermediv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = herme.hermeadd(ci, cj) + quo, rem = herme.hermediv(tgt, ci) + res = herme.hermeadd(herme.hermemul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermepow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(herme.hermemul, [c]*j, np.array([1])) + res = herme.hermepow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([4., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermeval(self): + #check empty input + assert_equal(herme.hermeval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Helist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = herme.hermeval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(herme.hermeval(x, [1]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims) + + def test_hermeval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = herme.hermeval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermeval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermeval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = herme.hermeval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermeval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermegrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herme.hermegrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermegrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_hermegrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herme.hermegrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermegrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_hermeint(self): + # check exceptions + assert_raises(TypeError, herme.hermeint, [0], .5) + assert_raises(ValueError, herme.hermeint, [0], -1) + assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) + assert_raises(ValueError, herme.hermeint, [0], lbnd=[0]) + assert_raises(ValueError, herme.hermeint, [0], scl=[0]) + assert_raises(TypeError, herme.hermeint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = herme.hermeint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i]) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herme.hermeval(-1, hermeint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1) + res = herme.hermeint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k]) + res = herme.hermeint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) + res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) + res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T + res = herme.hermeint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c) for c in c2d]) + res = herme.hermeint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d]) + res = herme.hermeint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_hermeder(self): + # check exceptions + assert_raises(TypeError, herme.hermeder, [0], .5) + assert_raises(ValueError, herme.hermeder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = herme.hermeder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herme.hermeder( + herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T + res = herme.hermeder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeder(c) for c in c2d]) + res = herme.hermeder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_hermevander(self): + # check for 1d x + x = np.arange(3) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + def test_hermevander2d(self): + # also tests hermeval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herme.hermevander2d(x1, x2, [1, 2]) + tgt = herme.hermeval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermevander3d(self): + # also tests hermeval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herme.hermevander3d(x1, x2, x3, [1, 2, 3]) + tgt = herme.hermeval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_hermefit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herme.hermefit, [1], [1], -1) + assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) + assert_raises(TypeError, herme.hermefit, [], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) + assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herme.hermefit, [1], [1], [-1,]) + assert_raises(ValueError, herme.hermefit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herme.hermefit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herme.hermefit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + coef3 = herme.hermefit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + # + coef4 = herme.hermefit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + coef4 = herme.hermefit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herme.hermefit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # + coef2d = herme.hermefit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herme.hermefit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herme.hermefit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herme.hermefit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herme.hermefit(x, x, 1), [0, 1]) + assert_almost_equal(herme.hermefit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herme.hermefit(x, y, 4) + assert_almost_equal(herme.hermeval(x, coef1), y) + coef2 = herme.hermefit(x, y, [0, 2, 4]) + assert_almost_equal(herme.hermeval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, herme.hermecompanion, []) + assert_raises(ValueError, herme.hermecompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(herme.hermecompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herme.hermecompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = herme.hermegauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herme.hermevander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(2*np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_hermefromroots(self): + res = herme.hermefromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = herme.hermefromroots(roots) + res = herme.hermeval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herme.herme2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermeroots(self): + assert_almost_equal(herme.hermeroots([1]), []) + assert_almost_equal(herme.hermeroots([1, 1]), [-1]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herme.hermeroots(herme.hermefromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermetrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herme.hermetrim, coef, -1) + + # Test results + assert_equal(herme.hermetrim(coef), coef[:-1]) + assert_equal(herme.hermetrim(coef, 1), coef[:-3]) + assert_equal(herme.hermetrim(coef, 2), [0]) + + def test_hermeline(self): + assert_equal(herme.hermeline(3, 4), [3, 4]) + + def test_herme2poly(self): + for i in range(10): + assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i]) + + def test_poly2herme(self): + for i in range(10): + assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-.5*x**2) + res = herme.hermeweight(x) + assert_almost_equal(res, tgt) diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/test_laguerre.py b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_laguerre.py new file mode 100644 index 0000000000000000000000000000000000000000..b889f49a53b6888189bb3460294a7237b398d651 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_laguerre.py @@ -0,0 +1,537 @@ +"""Tests for laguerre module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.laguerre as lag +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +L0 = np.array([1])/1 +L1 = np.array([1, -1])/1 +L2 = np.array([2, -4, 1])/2 +L3 = np.array([6, -18, 9, -1])/6 +L4 = np.array([24, -96, 72, -16, 1])/24 +L5 = np.array([120, -600, 600, -200, 25, -1])/120 +L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1])/720 + +Llist = [L0, L1, L2, L3, L4, L5, L6] + + +def trim(x): + return lag.lagtrim(x, tol=1e-6) + + +class TestConstants: + + def test_lagdomain(self): + assert_equal(lag.lagdomain, [0, 1]) + + def test_lagzero(self): + assert_equal(lag.lagzero, [0]) + + def test_lagone(self): + assert_equal(lag.lagone, [1]) + + def test_lagx(self): + assert_equal(lag.lagx, [1, -1]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_lagadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = lag.lagadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = lag.lagsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagmulx(self): + assert_equal(lag.lagmulx([0]), [0]) + assert_equal(lag.lagmulx([1]), [1, -1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)] + assert_almost_equal(lag.lagmulx(ser), tgt) + + def test_lagmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = lag.lagval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0]*j + [1] + val2 = lag.lagval(self.x, pol2) + pol3 = lag.lagmul(pol1, pol2) + val3 = lag.lagval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_lagdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = lag.lagadd(ci, cj) + quo, rem = lag.lagdiv(tgt, ci) + res = lag.lagadd(lag.lagmul(quo, ci), rem) + assert_almost_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(lag.lagmul, [c]*j, np.array([1])) + res = lag.lagpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([9., -14., 6.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_lagval(self): + #check empty input + assert_equal(lag.lagval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(7): + msg = f"At i={i}" + tgt = y[i] + res = lag.lagval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(lag.lagval(x, [1]).shape, dims) + assert_equal(lag.lagval(x, [1, 0]).shape, dims) + assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims) + + def test_lagval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = lag.lagval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.lagval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_lagval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = lag.lagval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.lagval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_laggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = lag.laggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.laggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_laggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = lag.laggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.laggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_lagint(self): + # check exceptions + assert_raises(TypeError, lag.lagint, [0], .5) + assert_raises(ValueError, lag.lagint, [0], -1) + assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) + assert_raises(ValueError, lag.lagint, [0], lbnd=[0]) + assert_raises(ValueError, lag.lagint, [0], scl=[0]) + assert_raises(TypeError, lag.lagint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = lag.lagint([0], m=i, k=k) + assert_almost_equal(res, [1, -1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i]) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(lag.lagval(-1, lagint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1) + res = lag.lagint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k]) + res = lag.lagint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) + res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], scl=2) + res = lag.lagint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T + res = lag.lagint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c) for c in c2d]) + res = lag.lagint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c, k=3) for c in c2d]) + res = lag.lagint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_lagder(self): + # check exceptions + assert_raises(TypeError, lag.lagder, [0], .5) + assert_raises(ValueError, lag.lagder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = lag.lagder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = lag.lagder(lag.lagint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T + res = lag.lagder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagder(c) for c in c2d]) + res = lag.lagder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_lagvander(self): + # check for 1d x + x = np.arange(3) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + def test_lagvander2d(self): + # also tests lagval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = lag.lagvander2d(x1, x2, [1, 2]) + tgt = lag.lagval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_lagvander3d(self): + # also tests lagval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = lag.lagvander3d(x1, x2, x3, [1, 2, 3]) + tgt = lag.lagval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_lagfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + # Test exceptions + assert_raises(ValueError, lag.lagfit, [1], [1], -1) + assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) + assert_raises(TypeError, lag.lagfit, [], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) + assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, lag.lagfit, [1], [1], [-1,]) + assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, lag.lagfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = lag.lagfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + coef3 = lag.lagfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + # + coef4 = lag.lagfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + # + coef2d = lag.lagfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = lag.lagfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(lag.lagfit(x, x, 1), [1, -1]) + assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1]) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, lag.lagcompanion, []) + assert_raises(ValueError, lag.lagcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(lag.lagcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5) + + +class TestGauss: + + def test_100(self): + x, w = lag.laggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = lag.lagvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 1.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_lagfromroots(self): + res = lag.lagfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = lag.lagfromroots(roots) + res = lag.lagval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(lag.lag2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_lagroots(self): + assert_almost_equal(lag.lagroots([1]), []) + assert_almost_equal(lag.lagroots([0, 1]), [1]) + for i in range(2, 5): + tgt = np.linspace(0, 3, i) + res = lag.lagroots(lag.lagfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, lag.lagtrim, coef, -1) + + # Test results + assert_equal(lag.lagtrim(coef), coef[:-1]) + assert_equal(lag.lagtrim(coef, 1), coef[:-3]) + assert_equal(lag.lagtrim(coef, 2), [0]) + + def test_lagline(self): + assert_equal(lag.lagline(3, 4), [7, -4]) + + def test_lag2poly(self): + for i in range(7): + assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i]) + + def test_poly2lag(self): + for i in range(7): + assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(0, 10, 11) + tgt = np.exp(-x) + res = lag.lagweight(x) + assert_almost_equal(res, tgt) diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/test_legendre.py b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_legendre.py new file mode 100644 index 0000000000000000000000000000000000000000..93de5bb4cf86488b15983889058750f7b8fa1d33 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_legendre.py @@ -0,0 +1,568 @@ +"""Tests for legendre module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.legendre as leg +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +L0 = np.array([1]) +L1 = np.array([0, 1]) +L2 = np.array([-1, 0, 3])/2 +L3 = np.array([0, -3, 0, 5])/2 +L4 = np.array([3, 0, -30, 0, 35])/8 +L5 = np.array([0, 15, 0, -70, 0, 63])/8 +L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16 +L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16 +L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128 +L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128 + +Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] + + +def trim(x): + return leg.legtrim(x, tol=1e-6) + + +class TestConstants: + + def test_legdomain(self): + assert_equal(leg.legdomain, [-1, 1]) + + def test_legzero(self): + assert_equal(leg.legzero, [0]) + + def test_legone(self): + assert_equal(leg.legone, [1]) + + def test_legx(self): + assert_equal(leg.legx, [0, 1]) + + +class TestArithmetic: + x = np.linspace(-1, 1, 100) + + def test_legadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = leg.legadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = leg.legsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legmulx(self): + assert_equal(leg.legmulx([0]), [0]) + assert_equal(leg.legmulx([1]), [0, 1]) + for i in range(1, 5): + tmp = 2*i + 1 + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp] + assert_equal(leg.legmulx(ser), tgt) + + def test_legmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = leg.legval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0]*j + [1] + val2 = leg.legval(self.x, pol2) + pol3 = leg.legmul(pol1, pol2) + val3 = leg.legval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_legdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = leg.legadd(ci, cj) + quo, rem = leg.legdiv(tgt, ci) + res = leg.legadd(leg.legmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(leg.legmul, [c]*j, np.array([1])) + res = leg.legpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2., 2., 2.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_legval(self): + #check empty input + assert_equal(leg.legval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = leg.legval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(leg.legval(x, [1]).shape, dims) + assert_equal(leg.legval(x, [1, 0]).shape, dims) + assert_equal(leg.legval(x, [1, 0, 0]).shape, dims) + + def test_legval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = leg.legval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.legval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_legval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = leg.legval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.legval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_leggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = leg.leggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.leggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_leggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = leg.leggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.leggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_legint(self): + # check exceptions + assert_raises(TypeError, leg.legint, [0], .5) + assert_raises(ValueError, leg.legint, [0], -1) + assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) + assert_raises(ValueError, leg.legint, [0], lbnd=[0]) + assert_raises(ValueError, leg.legint, [0], scl=[0]) + assert_raises(TypeError, leg.legint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = leg.legint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i]) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(leg.legval(-1, legint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], scl=2) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1) + res = leg.legint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k]) + res = leg.legint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) + res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], scl=2) + res = leg.legint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legint(c) for c in c2d.T]).T + res = leg.legint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c) for c in c2d]) + res = leg.legint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c, k=3) for c in c2d]) + res = leg.legint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + def test_legint_zerointord(self): + assert_equal(leg.legint((1, 2, 3), 0), (1, 2, 3)) + + +class TestDerivative: + + def test_legder(self): + # check exceptions + assert_raises(TypeError, leg.legder, [0], .5) + assert_raises(ValueError, leg.legder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = leg.legder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = leg.legder(leg.legint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legder(c) for c in c2d.T]).T + res = leg.legder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legder(c) for c in c2d]) + res = leg.legder(c2d, axis=1) + assert_almost_equal(res, tgt) + + def test_legder_orderhigherthancoeff(self): + c = (1, 2, 3, 4) + assert_equal(leg.legder(c, 4), [0]) + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_legvander(self): + # check for 1d x + x = np.arange(3) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + def test_legvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = leg.legvander2d(x1, x2, [1, 2]) + tgt = leg.legval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_legvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = leg.legvander3d(x1, x2, x3, [1, 2, 3]) + tgt = leg.legval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + def test_legvander_negdeg(self): + assert_raises(ValueError, leg.legvander, (1, 2, 3), -1) + + +class TestFitting: + + def test_legfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, leg.legfit, [1], [1], -1) + assert_raises(TypeError, leg.legfit, [[1]], [1], 0) + assert_raises(TypeError, leg.legfit, [], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) + assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, leg.legfit, [1], [1], [-1,]) + assert_raises(ValueError, leg.legfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, leg.legfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = leg.legfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + coef3 = leg.legfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + # + coef4 = leg.legfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + coef4 = leg.legfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = leg.legfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # + coef2d = leg.legfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = leg.legfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = leg.legfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = leg.legfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(leg.legfit(x, x, 1), [0, 1]) + assert_almost_equal(leg.legfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = leg.legfit(x, y, 4) + assert_almost_equal(leg.legval(x, coef1), y) + coef2 = leg.legfit(x, y, [0, 2, 4]) + assert_almost_equal(leg.legval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, leg.legcompanion, []) + assert_raises(ValueError, leg.legcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(leg.legcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(leg.legcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = leg.leggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = leg.legvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 2.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_legfromroots(self): + res = leg.legfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = leg.legfromroots(roots) + res = leg.legval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(leg.leg2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_legroots(self): + assert_almost_equal(leg.legroots([1]), []) + assert_almost_equal(leg.legroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = leg.legroots(leg.legfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, leg.legtrim, coef, -1) + + # Test results + assert_equal(leg.legtrim(coef), coef[:-1]) + assert_equal(leg.legtrim(coef, 1), coef[:-3]) + assert_equal(leg.legtrim(coef, 2), [0]) + + def test_legline(self): + assert_equal(leg.legline(3, 4), [3, 4]) + + def test_legline_zeroscl(self): + assert_equal(leg.legline(3, 0), [3]) + + def test_leg2poly(self): + for i in range(10): + assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i]) + + def test_poly2leg(self): + for i in range(10): + assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11) + tgt = 1. + res = leg.legweight(x) + assert_almost_equal(res, tgt) diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/test_polynomial.py b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_polynomial.py new file mode 100644 index 0000000000000000000000000000000000000000..835bbd3fc8051e8d071913a6ef0eb41bd2cf62b2 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_polynomial.py @@ -0,0 +1,629 @@ +"""Tests for polynomial module. + +""" +from functools import reduce +from fractions import Fraction +import numpy as np +import numpy.polynomial.polynomial as poly +import pickle +from copy import deepcopy +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + assert_array_equal, assert_raises_regex) + + +def trim(x): + return poly.polytrim(x, tol=1e-6) + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestConstants: + + def test_polydomain(self): + assert_equal(poly.polydomain, [-1, 1]) + + def test_polyzero(self): + assert_equal(poly.polyzero, [0]) + + def test_polyone(self): + assert_equal(poly.polyone, [1]) + + def test_polyx(self): + assert_equal(poly.polyx, [0, 1]) + + def test_copy(self): + x = poly.Polynomial([1, 2, 3]) + y = deepcopy(x) + assert_equal(x, y) + + def test_pickle(self): + x = poly.Polynomial([1, 2, 3]) + y = pickle.loads(pickle.dumps(x)) + assert_equal(x, y) + +class TestArithmetic: + + def test_polyadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = poly.polyadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polysub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = poly.polysub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polymulx(self): + assert_equal(poly.polymulx([0]), [0]) + assert_equal(poly.polymulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i + 1) + [1] + assert_equal(poly.polymulx(ser), tgt) + + def test_polymul(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(i + j + 1) + tgt[i + j] += 1 + res = poly.polymul([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polydiv(self): + # check zero division + assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) + + # check scalar division + quo, rem = poly.polydiv([2], [2]) + assert_equal((quo, rem), (1, 0)) + quo, rem = poly.polydiv([2, 2], [2]) + assert_equal((quo, rem), ((1, 1), 0)) + + # check rest. + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1, 2] + cj = [0]*j + [1, 2] + tgt = poly.polyadd(ci, cj) + quo, rem = poly.polydiv(tgt, ci) + res = poly.polyadd(poly.polymul(quo, ci), rem) + assert_equal(res, tgt, err_msg=msg) + + def test_polypow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(poly.polymul, [c]*j, np.array([1])) + res = poly.polypow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + +class TestFraction: + + def test_Fraction(self): + # assert we can use Polynomials with coefficients of object dtype + f = Fraction(2, 3) + one = Fraction(1, 1) + zero = Fraction(0, 1) + p = poly.Polynomial([f, f], domain=[zero, one], window=[zero, one]) + + x = 2 * p + p ** 2 + assert_equal(x.coef, np.array([Fraction(16, 9), Fraction(20, 9), + Fraction(4, 9)], dtype=object)) + assert_equal(p.domain, [zero, one]) + assert_equal(p.coef.dtype, np.dtypes.ObjectDType()) + assert_(isinstance(p(f), Fraction)) + assert_equal(p(f), Fraction(10, 9)) + p_deriv = poly.Polynomial([Fraction(2, 3)], domain=[zero, one], + window=[zero, one]) + assert_equal(p.deriv(), p_deriv) + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([1., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = poly.polyval(x, [1., 2., 3.]) + + def test_polyval(self): + #check empty input + assert_equal(poly.polyval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(5): + tgt = y[i] + res = poly.polyval(x, [0]*i + [1]) + assert_almost_equal(res, tgt) + tgt = x*(x**2 - 1) + res = poly.polyval(x, [0, -1, 0, 1]) + assert_almost_equal(res, tgt) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(poly.polyval(x, [1]).shape, dims) + assert_equal(poly.polyval(x, [1, 0]).shape, dims) + assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) + + #check masked arrays are processed correctly + mask = [False, True, False] + mx = np.ma.array([1, 2, 3], mask=mask) + res = np.polyval([7, 5, 3], mx) + assert_array_equal(res.mask, mask) + + #check subtypes of ndarray are preserved + class C(np.ndarray): + pass + + cx = np.array([1, 2, 3]).view(C) + assert_equal(type(np.polyval([2, 3, 4], cx)), C) + + def test_polyvalfromroots(self): + # check exception for broadcasting x values over root array with + # too few dimensions + assert_raises(ValueError, poly.polyvalfromroots, + [1], [1], tensor=False) + + # check empty input + assert_equal(poly.polyvalfromroots([], [1]).size, 0) + assert_(poly.polyvalfromroots([], [1]).shape == (0,)) + + # check empty input + multidimensional roots + assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0) + assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0)) + + # check scalar input + assert_equal(poly.polyvalfromroots(1, 1), 0) + assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,)) + + # check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(1, 5): + tgt = y[i] + res = poly.polyvalfromroots(x, [0]*i) + assert_almost_equal(res, tgt) + tgt = x*(x - 1)*(x + 1) + res = poly.polyvalfromroots(x, [-1, 0, 1]) + assert_almost_equal(res, tgt) + + # check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(poly.polyvalfromroots(x, [1]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims) + + # check compatibility with factorization + ptest = [15, 2, -16, -2, 1] + r = poly.polyroots(ptest) + x = np.linspace(-1, 1) + assert_almost_equal(poly.polyval(x, ptest), + poly.polyvalfromroots(x, r)) + + # check multidimensional arrays of roots and values + # check tensor=False + rshape = (3, 5) + x = np.arange(-3, 2) + r = np.random.randint(-5, 5, size=rshape) + res = poly.polyvalfromroots(x, r, tensor=False) + tgt = np.empty(r.shape[1:]) + for ii in range(tgt.size): + tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii]) + assert_equal(res, tgt) + + # check tensor=True + x = np.vstack([x, 2*x]) + res = poly.polyvalfromroots(x, r, tensor=True) + tgt = np.empty(r.shape[1:] + x.shape) + for ii in range(r.shape[1]): + for jj in range(x.shape[0]): + tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii]) + assert_equal(res, tgt) + + def test_polyval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = poly.polyval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polyval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_polyval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = poly.polyval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polyval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_polygrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = poly.polygrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polygrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_polygrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = poly.polygrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polygrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_polyint(self): + # check exceptions + assert_raises(TypeError, poly.polyint, [0], .5) + assert_raises(ValueError, poly.polyint, [0], -1) + assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) + assert_raises(ValueError, poly.polyint, [0], lbnd=[0]) + assert_raises(ValueError, poly.polyint, [0], scl=[0]) + assert_raises(TypeError, poly.polyint, [0], axis=.5) + assert_raises(TypeError, poly.polyint, [1, 1], 1.) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = poly.polyint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + res = poly.polyint(pol, m=1, k=[i]) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + res = poly.polyint(pol, m=1, k=[i], lbnd=-1) + assert_almost_equal(poly.polyval(-1, res), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + res = poly.polyint(pol, m=1, k=[i], scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1) + res = poly.polyint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k]) + res = poly.polyint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) + res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], scl=2) + res = poly.polyint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T + res = poly.polyint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c) for c in c2d]) + res = poly.polyint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c, k=3) for c in c2d]) + res = poly.polyint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_polyder(self): + # check exceptions + assert_raises(TypeError, poly.polyder, [0], .5) + assert_raises(ValueError, poly.polyder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = poly.polyder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = poly.polyder(poly.polyint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T + res = poly.polyder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyder(c) for c in c2d]) + res = poly.polyder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_polyvander(self): + # check for 1d x + x = np.arange(3) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + def test_polyvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = poly.polyvander2d(x1, x2, [1, 2]) + tgt = poly.polyval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_polyvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = poly.polyvander3d(x1, x2, x3, [1, 2, 3]) + tgt = poly.polyval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + def test_polyvandernegdeg(self): + x = np.arange(3) + assert_raises(ValueError, poly.polyvander, x, -1) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, poly.polycompanion, []) + assert_raises(ValueError, poly.polycompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(poly.polycompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(poly.polycompanion([1, 2])[0, 0] == -.5) + + +class TestMisc: + + def test_polyfromroots(self): + res = poly.polyfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + tgt = Tlist[i] + res = poly.polyfromroots(roots)*2**(i-1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyroots(self): + assert_almost_equal(poly.polyroots([1]), []) + assert_almost_equal(poly.polyroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = poly.polyroots(poly.polyfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, poly.polyfit, [1], [1], -1) + assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) + assert_raises(TypeError, poly.polyfit, [], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) + assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, poly.polyfit, [1], [1], [-1,]) + assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, poly.polyfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = poly.polyfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + coef3 = poly.polyfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + # + coef4 = poly.polyfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + # + coef2d = poly.polyfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + yw[0::2] = 0 + wcoef3 = poly.polyfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(poly.polyfit(x, x, 1), [0, 1]) + assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Polyendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = poly.polyfit(x, y, 4) + assert_almost_equal(poly.polyval(x, coef1), y) + coef2 = poly.polyfit(x, y, [0, 2, 4]) + assert_almost_equal(poly.polyval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + def test_polytrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, poly.polytrim, coef, -1) + + # Test results + assert_equal(poly.polytrim(coef), coef[:-1]) + assert_equal(poly.polytrim(coef, 1), coef[:-3]) + assert_equal(poly.polytrim(coef, 2), [0]) + + def test_polyline(self): + assert_equal(poly.polyline(3, 4), [3, 4]) + + def test_polyline_zero(self): + assert_equal(poly.polyline(3, 0), [3]) diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/test_polyutils.py b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_polyutils.py new file mode 100644 index 0000000000000000000000000000000000000000..176353a28af890e7e521ceb19d6f9cf42eb8b2cc --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_polyutils.py @@ -0,0 +1,125 @@ +"""Tests for polyutils module. + +""" +import numpy as np +import numpy.polynomial.polyutils as pu +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + + +class TestMisc: + + def test_trimseq(self): + tgt = [1] + for num_trailing_zeros in range(5): + res = pu.trimseq([1] + [0] * num_trailing_zeros) + assert_equal(res, tgt) + + def test_trimseq_empty_input(self): + for empty_seq in [[], np.array([], dtype=np.int32)]: + assert_equal(pu.trimseq(empty_seq), empty_seq) + + def test_as_series(self): + # check exceptions + assert_raises(ValueError, pu.as_series, [[]]) + assert_raises(ValueError, pu.as_series, [[[1, 2]]]) + assert_raises(ValueError, pu.as_series, [[1], ['a']]) + # check common types + types = ['i', 'd', 'O'] + for i in range(len(types)): + for j in range(i): + ci = np.ones(1, types[i]) + cj = np.ones(1, types[j]) + [resi, resj] = pu.as_series([ci, cj]) + assert_(resi.dtype.char == resj.dtype.char) + assert_(resj.dtype.char == types[i]) + + def test_trimcoef(self): + coef = [2, -1, 1, 0] + # Test exceptions + assert_raises(ValueError, pu.trimcoef, coef, -1) + # Test results + assert_equal(pu.trimcoef(coef), coef[:-1]) + assert_equal(pu.trimcoef(coef, 1), coef[:-3]) + assert_equal(pu.trimcoef(coef, 2), [0]) + + def test_vander_nd_exception(self): + # n_dims != len(points) + assert_raises(ValueError, pu._vander_nd, (), (1, 2, 3), [90]) + # n_dims != len(degrees) + assert_raises(ValueError, pu._vander_nd, (), (), [90.65]) + # n_dims == 0 + assert_raises(ValueError, pu._vander_nd, (), (), []) + + def test_div_zerodiv(self): + # c2[-1] == 0 + assert_raises(ZeroDivisionError, pu._div, pu._div, (1, 2, 3), [0]) + + def test_pow_too_large(self): + # power > maxpower + assert_raises(ValueError, pu._pow, (), [1, 2, 3], 5, 4) + +class TestDomain: + + def test_getdomain(self): + # test for real values + x = [1, 10, 3, -1] + tgt = [-1, 10] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + # test for complex values + x = [1 + 1j, 1 - 1j, 0, 2] + tgt = [-1j, 2 + 1j] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + def test_mapdomain(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = dom2 + res = pu.mapdomain(dom1, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = dom2 + x = dom1 + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for multidimensional arrays + dom1 = [0, 4] + dom2 = [1, 3] + tgt = np.array([dom2, dom2]) + x = np.array([dom1, dom1]) + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test that subtypes are preserved. + class MyNDArray(np.ndarray): + pass + + dom1 = [0, 4] + dom2 = [1, 3] + x = np.array([dom1, dom1]).view(MyNDArray) + res = pu.mapdomain(x, dom1, dom2) + assert_(isinstance(res, MyNDArray)) + + def test_mapparms(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = [1, .5] + res = pu. mapparms(dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = [-1 + 1j, 1 - 1j] + res = pu.mapparms(dom1, dom2) + assert_almost_equal(res, tgt) diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/test_printing.py b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_printing.py new file mode 100644 index 0000000000000000000000000000000000000000..2b583a438b40fc3ca3164bd78b0cd498df646fba --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_printing.py @@ -0,0 +1,552 @@ +from math import nan, inf +import pytest +from numpy._core import array, arange, printoptions +import numpy.polynomial as poly +from numpy.testing import assert_equal, assert_ + +# For testing polynomial printing with object arrays +from fractions import Fraction +from decimal import Decimal + + +class TestStrUnicodeSuperSubscripts: + + @pytest.fixture(scope='class', autouse=True) + def use_unicode(self): + poly.set_default_printstyle('unicode') + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·x + 3.0·x²"), + ([-1, 0, 3, -1], "-1.0 + 0.0·x + 3.0·x² - 1.0·x³"), + (arange(12), ("0.0 + 1.0·x + 2.0·x² + 3.0·x³ + 4.0·x⁴ + 5.0·x⁵ + " + "6.0·x⁶ + 7.0·x⁷ +\n8.0·x⁸ + 9.0·x⁹ + 10.0·x¹⁰ + " + "11.0·x¹¹")), + )) + def test_polynomial_str(self, inp, tgt): + p = poly.Polynomial(inp) + res = str(p) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·T₁(x) + 3.0·T₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·T₁(x) + 3.0·T₂(x) - 1.0·T₃(x)"), + (arange(12), ("0.0 + 1.0·T₁(x) + 2.0·T₂(x) + 3.0·T₃(x) + 4.0·T₄(x) + " + "5.0·T₅(x) +\n6.0·T₆(x) + 7.0·T₇(x) + 8.0·T₈(x) + " + "9.0·T₉(x) + 10.0·T₁₀(x) + 11.0·T₁₁(x)")), + )) + def test_chebyshev_str(self, inp, tgt): + res = str(poly.Chebyshev(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·P₁(x) + 3.0·P₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·P₁(x) + 3.0·P₂(x) - 1.0·P₃(x)"), + (arange(12), ("0.0 + 1.0·P₁(x) + 2.0·P₂(x) + 3.0·P₃(x) + 4.0·P₄(x) + " + "5.0·P₅(x) +\n6.0·P₆(x) + 7.0·P₇(x) + 8.0·P₈(x) + " + "9.0·P₉(x) + 10.0·P₁₀(x) + 11.0·P₁₁(x)")), + )) + def test_legendre_str(self, inp, tgt): + res = str(poly.Legendre(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·H₁(x) + 3.0·H₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·H₁(x) + 3.0·H₂(x) - 1.0·H₃(x)"), + (arange(12), ("0.0 + 1.0·H₁(x) + 2.0·H₂(x) + 3.0·H₃(x) + 4.0·H₄(x) + " + "5.0·H₅(x) +\n6.0·H₆(x) + 7.0·H₇(x) + 8.0·H₈(x) + " + "9.0·H₉(x) + 10.0·H₁₀(x) + 11.0·H₁₁(x)")), + )) + def test_hermite_str(self, inp, tgt): + res = str(poly.Hermite(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·He₁(x) + 3.0·He₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·He₁(x) + 3.0·He₂(x) - 1.0·He₃(x)"), + (arange(12), ("0.0 + 1.0·He₁(x) + 2.0·He₂(x) + 3.0·He₃(x) + " + "4.0·He₄(x) + 5.0·He₅(x) +\n6.0·He₆(x) + 7.0·He₇(x) + " + "8.0·He₈(x) + 9.0·He₉(x) + 10.0·He₁₀(x) +\n" + "11.0·He₁₁(x)")), + )) + def test_hermiteE_str(self, inp, tgt): + res = str(poly.HermiteE(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·L₁(x) + 3.0·L₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·L₁(x) + 3.0·L₂(x) - 1.0·L₃(x)"), + (arange(12), ("0.0 + 1.0·L₁(x) + 2.0·L₂(x) + 3.0·L₃(x) + 4.0·L₄(x) + " + "5.0·L₅(x) +\n6.0·L₆(x) + 7.0·L₇(x) + 8.0·L₈(x) + " + "9.0·L₉(x) + 10.0·L₁₀(x) + 11.0·L₁₁(x)")), + )) + def test_laguerre_str(self, inp, tgt): + res = str(poly.Laguerre(inp)) + assert_equal(res, tgt) + + def test_polynomial_str_domains(self): + res = str(poly.Polynomial([0, 1])) + tgt = '0.0 + 1.0·x' + assert_equal(res, tgt) + + res = str(poly.Polynomial([0, 1], domain=[1, 2])) + tgt = '0.0 + 1.0·(-3.0 + 2.0x)' + assert_equal(res, tgt) + +class TestStrAscii: + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 x + 3.0 x**2"), + ([-1, 0, 3, -1], "-1.0 + 0.0 x + 3.0 x**2 - 1.0 x**3"), + (arange(12), ("0.0 + 1.0 x + 2.0 x**2 + 3.0 x**3 + 4.0 x**4 + " + "5.0 x**5 + 6.0 x**6 +\n7.0 x**7 + 8.0 x**8 + " + "9.0 x**9 + 10.0 x**10 + 11.0 x**11")), + )) + def test_polynomial_str(self, inp, tgt): + res = str(poly.Polynomial(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 T_1(x) + 3.0 T_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 T_1(x) + 3.0 T_2(x) - 1.0 T_3(x)"), + (arange(12), ("0.0 + 1.0 T_1(x) + 2.0 T_2(x) + 3.0 T_3(x) + " + "4.0 T_4(x) + 5.0 T_5(x) +\n6.0 T_6(x) + 7.0 T_7(x) + " + "8.0 T_8(x) + 9.0 T_9(x) + 10.0 T_10(x) +\n" + "11.0 T_11(x)")), + )) + def test_chebyshev_str(self, inp, tgt): + res = str(poly.Chebyshev(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 P_1(x) + 3.0 P_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 P_1(x) + 3.0 P_2(x) - 1.0 P_3(x)"), + (arange(12), ("0.0 + 1.0 P_1(x) + 2.0 P_2(x) + 3.0 P_3(x) + " + "4.0 P_4(x) + 5.0 P_5(x) +\n6.0 P_6(x) + 7.0 P_7(x) + " + "8.0 P_8(x) + 9.0 P_9(x) + 10.0 P_10(x) +\n" + "11.0 P_11(x)")), + )) + def test_legendre_str(self, inp, tgt): + res = str(poly.Legendre(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 H_1(x) + 3.0 H_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 H_1(x) + 3.0 H_2(x) - 1.0 H_3(x)"), + (arange(12), ("0.0 + 1.0 H_1(x) + 2.0 H_2(x) + 3.0 H_3(x) + " + "4.0 H_4(x) + 5.0 H_5(x) +\n6.0 H_6(x) + 7.0 H_7(x) + " + "8.0 H_8(x) + 9.0 H_9(x) + 10.0 H_10(x) +\n" + "11.0 H_11(x)")), + )) + def test_hermite_str(self, inp, tgt): + res = str(poly.Hermite(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 He_1(x) + 3.0 He_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 He_1(x) + 3.0 He_2(x) - 1.0 He_3(x)"), + (arange(12), ("0.0 + 1.0 He_1(x) + 2.0 He_2(x) + 3.0 He_3(x) + " + "4.0 He_4(x) +\n5.0 He_5(x) + 6.0 He_6(x) + " + "7.0 He_7(x) + 8.0 He_8(x) + 9.0 He_9(x) +\n" + "10.0 He_10(x) + 11.0 He_11(x)")), + )) + def test_hermiteE_str(self, inp, tgt): + res = str(poly.HermiteE(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 L_1(x) + 3.0 L_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 L_1(x) + 3.0 L_2(x) - 1.0 L_3(x)"), + (arange(12), ("0.0 + 1.0 L_1(x) + 2.0 L_2(x) + 3.0 L_3(x) + " + "4.0 L_4(x) + 5.0 L_5(x) +\n6.0 L_6(x) + 7.0 L_7(x) + " + "8.0 L_8(x) + 9.0 L_9(x) + 10.0 L_10(x) +\n" + "11.0 L_11(x)")), + )) + def test_laguerre_str(self, inp, tgt): + res = str(poly.Laguerre(inp)) + assert_equal(res, tgt) + + def test_polynomial_str_domains(self): + res = str(poly.Polynomial([0, 1])) + tgt = '0.0 + 1.0 x' + assert_equal(res, tgt) + + res = str(poly.Polynomial([0, 1], domain=[1, 2])) + tgt = '0.0 + 1.0 (-3.0 + 2.0x)' + assert_equal(res, tgt) + +class TestLinebreaking: + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + def test_single_line_one_less(self): + # With 'ascii' style, len(str(p)) is default linewidth - 1 (i.e. 74) + p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 123]) + assert_equal(len(str(p)), 74) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 + 123.0 x**4' + )) + + def test_num_chars_is_linewidth(self): + # len(str(p)) == default linewidth == 75 + p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 1234]) + assert_equal(len(str(p)), 75) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 +\n1234.0 x**4' + )) + + def test_first_linebreak_multiline_one_less_than_linewidth(self): + # Multiline str where len(first_line) + len(next_term) == lw - 1 == 74 + p = poly.Polynomial( + [12345678, 12345678, 12345678, 12345678, 1, 12345678] + ) + assert_equal(len(str(p).split('\n')[0]), 74) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 + 1.0 x**4 +\n12345678.0 x**5' + )) + + def test_first_linebreak_multiline_on_linewidth(self): + # First line is one character longer than previous test + p = poly.Polynomial( + [12345678, 12345678, 12345678, 12345678.12, 1, 12345678] + ) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.12 x**3 +\n1.0 x**4 + 12345678.0 x**5' + )) + + @pytest.mark.parametrize(('lw', 'tgt'), ( + (75, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + ' + '500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + ' + '900.0 x**9')), + (45, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 +\n40000.0 x**4 + ' + '500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 +\n' + '900.0 x**9')), + (132, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + ' + '500000.0 x**5 + 600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + ' + '900.0 x**9')), + )) + def test_linewidth_printoption(self, lw, tgt): + p = poly.Polynomial( + [0, 10, 200, 3000, 40000, 500000, 600000, 70000, 8000, 900] + ) + with printoptions(linewidth=lw): + assert_equal(str(p), tgt) + for line in str(p).split('\n'): + assert_(len(line) < lw) + + +def test_set_default_printoptions(): + p = poly.Polynomial([1, 2, 3]) + c = poly.Chebyshev([1, 2, 3]) + poly.set_default_printstyle('ascii') + assert_equal(str(p), "1.0 + 2.0 x + 3.0 x**2") + assert_equal(str(c), "1.0 + 2.0 T_1(x) + 3.0 T_2(x)") + poly.set_default_printstyle('unicode') + assert_equal(str(p), "1.0 + 2.0·x + 3.0·x²") + assert_equal(str(c), "1.0 + 2.0·T₁(x) + 3.0·T₂(x)") + with pytest.raises(ValueError): + poly.set_default_printstyle('invalid_input') + + +def test_complex_coefficients(): + """Test both numpy and built-in complex.""" + coefs = [0+1j, 1+1j, -2+2j, 3+0j] + # numpy complex + p1 = poly.Polynomial(coefs) + # Python complex + p2 = poly.Polynomial(array(coefs, dtype=object)) + poly.set_default_printstyle('unicode') + assert_equal(str(p1), "1j + (1+1j)·x - (2-2j)·x² + (3+0j)·x³") + assert_equal(str(p2), "1j + (1+1j)·x + (-2+2j)·x² + (3+0j)·x³") + poly.set_default_printstyle('ascii') + assert_equal(str(p1), "1j + (1+1j) x - (2-2j) x**2 + (3+0j) x**3") + assert_equal(str(p2), "1j + (1+1j) x + (-2+2j) x**2 + (3+0j) x**3") + + +@pytest.mark.parametrize(('coefs', 'tgt'), ( + (array([Fraction(1, 2), Fraction(3, 4)], dtype=object), ( + "1/2 + 3/4·x" + )), + (array([1, 2, Fraction(5, 7)], dtype=object), ( + "1 + 2·x + 5/7·x²" + )), + (array([Decimal('1.00'), Decimal('2.2'), 3], dtype=object), ( + "1.00 + 2.2·x + 3·x²" + )), +)) +def test_numeric_object_coefficients(coefs, tgt): + p = poly.Polynomial(coefs) + poly.set_default_printstyle('unicode') + assert_equal(str(p), tgt) + + +@pytest.mark.parametrize(('coefs', 'tgt'), ( + (array([1, 2, 'f'], dtype=object), '1 + 2·x + f·x²'), + (array([1, 2, [3, 4]], dtype=object), '1 + 2·x + [3, 4]·x²'), +)) +def test_nonnumeric_object_coefficients(coefs, tgt): + """ + Test coef fallback for object arrays of non-numeric coefficients. + """ + p = poly.Polynomial(coefs) + poly.set_default_printstyle('unicode') + assert_equal(str(p), tgt) + + +class TestFormat: + def test_format_unicode(self): + poly.set_default_printstyle('ascii') + p = poly.Polynomial([1, 2, 0, -1]) + assert_equal(format(p, 'unicode'), "1.0 + 2.0·x + 0.0·x² - 1.0·x³") + + def test_format_ascii(self): + poly.set_default_printstyle('unicode') + p = poly.Polynomial([1, 2, 0, -1]) + assert_equal( + format(p, 'ascii'), "1.0 + 2.0 x + 0.0 x**2 - 1.0 x**3" + ) + + def test_empty_formatstr(self): + poly.set_default_printstyle('ascii') + p = poly.Polynomial([1, 2, 3]) + assert_equal(format(p), "1.0 + 2.0 x + 3.0 x**2") + assert_equal(f"{p}", "1.0 + 2.0 x + 3.0 x**2") + + def test_bad_formatstr(self): + p = poly.Polynomial([1, 2, 0, -1]) + with pytest.raises(ValueError): + format(p, '.2f') + + +@pytest.mark.parametrize(('poly', 'tgt'), ( + (poly.Polynomial, '1.0 + 2.0·z + 3.0·z²'), + (poly.Chebyshev, '1.0 + 2.0·T₁(z) + 3.0·T₂(z)'), + (poly.Hermite, '1.0 + 2.0·H₁(z) + 3.0·H₂(z)'), + (poly.HermiteE, '1.0 + 2.0·He₁(z) + 3.0·He₂(z)'), + (poly.Laguerre, '1.0 + 2.0·L₁(z) + 3.0·L₂(z)'), + (poly.Legendre, '1.0 + 2.0·P₁(z) + 3.0·P₂(z)'), +)) +def test_symbol(poly, tgt): + p = poly([1, 2, 3], symbol='z') + assert_equal(f"{p:unicode}", tgt) + + +class TestRepr: + def test_polynomial_repr(self): + res = repr(poly.Polynomial([0, 1])) + tgt = ( + "Polynomial([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_chebyshev_repr(self): + res = repr(poly.Chebyshev([0, 1])) + tgt = ( + "Chebyshev([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_legendre_repr(self): + res = repr(poly.Legendre([0, 1])) + tgt = ( + "Legendre([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_hermite_repr(self): + res = repr(poly.Hermite([0, 1])) + tgt = ( + "Hermite([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_hermiteE_repr(self): + res = repr(poly.HermiteE([0, 1])) + tgt = ( + "HermiteE([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_laguerre_repr(self): + res = repr(poly.Laguerre([0, 1])) + tgt = ( + "Laguerre([0., 1.], domain=[0., 1.], window=[0., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + +class TestLatexRepr: + """Test the latex repr used by Jupyter""" + + @staticmethod + def as_latex(obj): + # right now we ignore the formatting of scalars in our tests, since + # it makes them too verbose. Ideally, the formatting of scalars will + # be fixed such that tests below continue to pass + obj._repr_latex_scalar = lambda x, parens=False: str(x) + try: + return obj._repr_latex_() + finally: + del obj._repr_latex_scalar + + def test_simple_polynomial(self): + # default input + p = poly.Polynomial([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$') + + # translated input + p = poly.Polynomial([1, 2, 3], domain=[-2, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') + + # scaled input + p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$') + + # affine input + p = poly.Polynomial([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') + + def test_basis_func(self): + p = poly.Chebyshev([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$') + # affine input - check no surplus parens are added + p = poly.Chebyshev([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') + + def test_multichar_basis_func(self): + p = poly.HermiteE([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$') + + def test_symbol_basic(self): + # default input + p = poly.Polynomial([1, 2, 3], symbol='z') + assert_equal(self.as_latex(p), + r'$z \mapsto 1.0 + 2.0\,z + 3.0\,z^{2}$') + + # translated input + p = poly.Polynomial([1, 2, 3], domain=[-2, 0], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(1.0 + z\right) + 3.0\,' + r'\left(1.0 + z\right)^{2}$' + ), + ) + + # scaled input + p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(2.0z\right) + 3.0\,' + r'\left(2.0z\right)^{2}$' + ), + ) + + # affine input + p = poly.Polynomial([1, 2, 3], domain=[-1, 0], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(1.0 + 2.0z\right) + 3.0\,' + r'\left(1.0 + 2.0z\right)^{2}$' + ), + ) + + def test_numeric_object_coefficients(self): + coefs = array([Fraction(1, 2), Fraction(1)]) + p = poly.Polynomial(coefs) + assert_equal(self.as_latex(p), '$x \\mapsto 1/2 + 1\\,x$') + +SWITCH_TO_EXP = ( + '1.0 + (1.0e-01) x + (1.0e-02) x**2', + '1.2 + (1.2e-01) x + (1.2e-02) x**2', + '1.23 + 0.12 x + (1.23e-02) x**2 + (1.23e-03) x**3', + '1.235 + 0.123 x + (1.235e-02) x**2 + (1.235e-03) x**3', + '1.2346 + 0.1235 x + 0.0123 x**2 + (1.2346e-03) x**3 + (1.2346e-04) x**4', + '1.23457 + 0.12346 x + 0.01235 x**2 + (1.23457e-03) x**3 + ' + '(1.23457e-04) x**4', + '1.234568 + 0.123457 x + 0.012346 x**2 + 0.001235 x**3 + ' + '(1.234568e-04) x**4 + (1.234568e-05) x**5', + '1.2345679 + 0.1234568 x + 0.0123457 x**2 + 0.0012346 x**3 + ' + '(1.2345679e-04) x**4 + (1.2345679e-05) x**5') + +class TestPrintOptions: + """ + Test the output is properly configured via printoptions. + The exponential notation is enabled automatically when the values + are too small or too large. + """ + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + def test_str(self): + p = poly.Polynomial([1/2, 1/7, 1/7*10**8, 1/7*10**9]) + assert_equal(str(p), '0.5 + 0.14285714 x + 14285714.28571429 x**2 ' + '+ (1.42857143e+08) x**3') + + with printoptions(precision=3): + assert_equal(str(p), '0.5 + 0.143 x + 14285714.286 x**2 ' + '+ (1.429e+08) x**3') + + def test_latex(self): + p = poly.Polynomial([1/2, 1/7, 1/7*10**8, 1/7*10**9]) + assert_equal(p._repr_latex_(), + r'$x \mapsto \text{0.5} + \text{0.14285714}\,x + ' + r'\text{14285714.28571429}\,x^{2} + ' + r'\text{(1.42857143e+08)}\,x^{3}$') + + with printoptions(precision=3): + assert_equal(p._repr_latex_(), + r'$x \mapsto \text{0.5} + \text{0.143}\,x + ' + r'\text{14285714.286}\,x^{2} + \text{(1.429e+08)}\,x^{3}$') + + def test_fixed(self): + p = poly.Polynomial([1/2]) + assert_equal(str(p), '0.5') + + with printoptions(floatmode='fixed'): + assert_equal(str(p), '0.50000000') + + with printoptions(floatmode='fixed', precision=4): + assert_equal(str(p), '0.5000') + + def test_switch_to_exp(self): + for i, s in enumerate(SWITCH_TO_EXP): + with printoptions(precision=i): + p = poly.Polynomial([1.23456789*10**-i + for i in range(i//2+3)]) + assert str(p).replace('\n', ' ') == s + + def test_non_finite(self): + p = poly.Polynomial([nan, inf]) + assert str(p) == 'nan + inf x' + assert p._repr_latex_() == r'$x \mapsto \text{nan} + \text{inf}\,x$' + with printoptions(nanstr='NAN', infstr='INF'): + assert str(p) == 'NAN + INF x' + assert p._repr_latex_() == \ + r'$x \mapsto \text{NAN} + \text{INF}\,x$' diff --git a/phivenv/Lib/site-packages/numpy/polynomial/tests/test_symbol.py b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_symbol.py new file mode 100644 index 0000000000000000000000000000000000000000..be64e964e76d71f38424bc2da82d180b0592f3b4 --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/polynomial/tests/test_symbol.py @@ -0,0 +1,216 @@ +""" +Tests related to the ``symbol`` attribute of the ABCPolyBase class. +""" + +import pytest +import numpy.polynomial as poly +from numpy._core import array +from numpy.testing import assert_equal, assert_raises, assert_ + + +class TestInit: + """ + Test polynomial creation with symbol kwarg. + """ + c = [1, 2, 3] + + def test_default_symbol(self): + p = poly.Polynomial(self.c) + assert_equal(p.symbol, 'x') + + @pytest.mark.parametrize(('bad_input', 'exception'), ( + ('', ValueError), + ('3', ValueError), + (None, TypeError), + (1, TypeError), + )) + def test_symbol_bad_input(self, bad_input, exception): + with pytest.raises(exception): + p = poly.Polynomial(self.c, symbol=bad_input) + + @pytest.mark.parametrize('symbol', ( + 'x', + 'x_1', + 'A', + 'xyz', + 'β', + )) + def test_valid_symbols(self, symbol): + """ + Values for symbol that should pass input validation. + """ + p = poly.Polynomial(self.c, symbol=symbol) + assert_equal(p.symbol, symbol) + + def test_property(self): + """ + 'symbol' attribute is read only. + """ + p = poly.Polynomial(self.c, symbol='x') + with pytest.raises(AttributeError): + p.symbol = 'z' + + def test_change_symbol(self): + p = poly.Polynomial(self.c, symbol='y') + # Create new polynomial from p with different symbol + pt = poly.Polynomial(p.coef, symbol='t') + assert_equal(pt.symbol, 't') + + +class TestUnaryOperators: + p = poly.Polynomial([1, 2, 3], symbol='z') + + def test_neg(self): + n = -self.p + assert_equal(n.symbol, 'z') + + def test_scalarmul(self): + out = self.p * 10 + assert_equal(out.symbol, 'z') + + def test_rscalarmul(self): + out = 10 * self.p + assert_equal(out.symbol, 'z') + + def test_pow(self): + out = self.p ** 3 + assert_equal(out.symbol, 'z') + + +@pytest.mark.parametrize( + 'rhs', + ( + poly.Polynomial([4, 5, 6], symbol='z'), + array([4, 5, 6]), + ), +) +class TestBinaryOperatorsSameSymbol: + """ + Ensure symbol is preserved for numeric operations on polynomials with + the same symbol + """ + p = poly.Polynomial([1, 2, 3], symbol='z') + + def test_add(self, rhs): + out = self.p + rhs + assert_equal(out.symbol, 'z') + + def test_sub(self, rhs): + out = self.p - rhs + assert_equal(out.symbol, 'z') + + def test_polymul(self, rhs): + out = self.p * rhs + assert_equal(out.symbol, 'z') + + def test_divmod(self, rhs): + for out in divmod(self.p, rhs): + assert_equal(out.symbol, 'z') + + def test_radd(self, rhs): + out = rhs + self.p + assert_equal(out.symbol, 'z') + + def test_rsub(self, rhs): + out = rhs - self.p + assert_equal(out.symbol, 'z') + + def test_rmul(self, rhs): + out = rhs * self.p + assert_equal(out.symbol, 'z') + + def test_rdivmod(self, rhs): + for out in divmod(rhs, self.p): + assert_equal(out.symbol, 'z') + + +class TestBinaryOperatorsDifferentSymbol: + p = poly.Polynomial([1, 2, 3], symbol='x') + other = poly.Polynomial([4, 5, 6], symbol='y') + ops = (p.__add__, p.__sub__, p.__mul__, p.__floordiv__, p.__mod__) + + @pytest.mark.parametrize('f', ops) + def test_binops_fails(self, f): + assert_raises(ValueError, f, self.other) + + +class TestEquality: + p = poly.Polynomial([1, 2, 3], symbol='x') + + def test_eq(self): + other = poly.Polynomial([1, 2, 3], symbol='x') + assert_(self.p == other) + + def test_neq(self): + other = poly.Polynomial([1, 2, 3], symbol='y') + assert_(not self.p == other) + + +class TestExtraMethods: + """ + Test other methods for manipulating/creating polynomial objects. + """ + p = poly.Polynomial([1, 2, 3, 0], symbol='z') + + def test_copy(self): + other = self.p.copy() + assert_equal(other.symbol, 'z') + + def test_trim(self): + other = self.p.trim() + assert_equal(other.symbol, 'z') + + def test_truncate(self): + other = self.p.truncate(2) + assert_equal(other.symbol, 'z') + + @pytest.mark.parametrize('kwarg', ( + {'domain': [-10, 10]}, + {'window': [-10, 10]}, + {'kind': poly.Chebyshev}, + )) + def test_convert(self, kwarg): + other = self.p.convert(**kwarg) + assert_equal(other.symbol, 'z') + + def test_integ(self): + other = self.p.integ() + assert_equal(other.symbol, 'z') + + def test_deriv(self): + other = self.p.deriv() + assert_equal(other.symbol, 'z') + + +def test_composition(): + p = poly.Polynomial([3, 2, 1], symbol="t") + q = poly.Polynomial([5, 1, 0, -1], symbol="λ_1") + r = p(q) + assert r.symbol == "λ_1" + + +# +# Class methods that result in new polynomial class instances +# + + +def test_fit(): + x, y = (range(10),)*2 + p = poly.Polynomial.fit(x, y, deg=1, symbol='z') + assert_equal(p.symbol, 'z') + + +def test_froomroots(): + roots = [-2, 2] + p = poly.Polynomial.fromroots(roots, symbol='z') + assert_equal(p.symbol, 'z') + + +def test_identity(): + p = poly.Polynomial.identity(domain=[-1, 1], window=[5, 20], symbol='z') + assert_equal(p.symbol, 'z') + + +def test_basis(): + p = poly.Polynomial.basis(3, symbol='z') + assert_equal(p.symbol, 'z') diff --git a/phivenv/Lib/site-packages/numpy/py.typed b/phivenv/Lib/site-packages/numpy/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/phivenv/Lib/site-packages/numpy/version.py b/phivenv/Lib/site-packages/numpy/version.py new file mode 100644 index 0000000000000000000000000000000000000000..ad9e84d3ddcfe54462740bfd11ec61c435290f6d --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/version.py @@ -0,0 +1,11 @@ + +""" +Module to expose more detailed version info for the installed `numpy` +""" +version = "2.0.2" +__version__ = version +full_version = version + +git_revision = "854252ded83e6b9c21c4ee80558d354d8a72484c" +release = 'dev' not in version and '+' not in version +short_version = version.split("+")[0] diff --git a/phivenv/Lib/site-packages/numpy/version.pyi b/phivenv/Lib/site-packages/numpy/version.pyi new file mode 100644 index 0000000000000000000000000000000000000000..6963b37697f5b90e44b74aeabe90768e8299c24f --- /dev/null +++ b/phivenv/Lib/site-packages/numpy/version.pyi @@ -0,0 +1,7 @@ +version: str +__version__: str +full_version: str + +git_revision: str +release: bool +short_version: str